#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
// @generated by torchgen/gen.py from RegisterFunctionalization.cpp

#include <ATen/core/LegacyTypeDispatch.h>
#include <ATen/EmptyTensor.h>
#include <ATen/FunctionalTensorWrapper.h>
#include <ATen/FunctionalInverses.h>
#include <ATen/MemoryOverlap.h>
#include <torch/library.h>

#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Operators.h>
#include <ATen/NativeFunctions.h>
#else
// needed for the meta tensor calls to get stride info in functionalization
#include <ATen/ops/empty_strided_native.h>
// needed for special handling of copy_().
// See Note [functionalizating copy_() and not preserving strides]
#include <ATen/ops/to_ops.h>
#include <ATen/ops/expand_copy_ops.h>

#include <ATen/ops/_new_zeros_with_same_feature_meta_native.h>
#include <ATen/ops/_new_zeros_with_same_feature_meta_ops.h>
#include <ATen/ops/_new_zeros_with_same_feature_meta_native.h>
#include <ATen/ops/_new_zeros_with_same_feature_meta_ops.h>
#include <ATen/ops/_cudnn_ctc_loss_native.h>
#include <ATen/ops/_cudnn_ctc_loss_ops.h>
#include <ATen/ops/_cudnn_ctc_loss_native.h>
#include <ATen/ops/_cudnn_ctc_loss_ops.h>
#include <ATen/ops/_cudnn_rnn_flatten_weight_native.h>
#include <ATen/ops/_cudnn_rnn_flatten_weight_ops.h>
#include <ATen/ops/_cudnn_rnn_flatten_weight_native.h>
#include <ATen/ops/_cudnn_rnn_flatten_weight_ops.h>
#include <ATen/ops/_cudnn_rnn_native.h>
#include <ATen/ops/_cudnn_rnn_ops.h>
#include <ATen/ops/_cudnn_rnn_native.h>
#include <ATen/ops/_cudnn_rnn_ops.h>
#include <ATen/ops/_cudnn_rnn_backward_native.h>
#include <ATen/ops/_cudnn_rnn_backward_ops.h>
#include <ATen/ops/_cudnn_rnn_backward_native.h>
#include <ATen/ops/_cudnn_rnn_backward_ops.h>
#include <ATen/ops/_cudnn_init_dropout_state_native.h>
#include <ATen/ops/_cudnn_init_dropout_state_ops.h>
#include <ATen/ops/_cudnn_init_dropout_state_native.h>
#include <ATen/ops/_cudnn_init_dropout_state_ops.h>
#include <ATen/ops/_fused_dropout_native.h>
#include <ATen/ops/_fused_dropout_ops.h>
#include <ATen/ops/_fused_dropout_native.h>
#include <ATen/ops/_fused_dropout_ops.h>
#include <ATen/ops/_masked_scale_native.h>
#include <ATen/ops/_masked_scale_ops.h>
#include <ATen/ops/_masked_scale_native.h>
#include <ATen/ops/_masked_scale_ops.h>
#include <ATen/ops/native_dropout_native.h>
#include <ATen/ops/native_dropout_ops.h>
#include <ATen/ops/native_dropout_native.h>
#include <ATen/ops/native_dropout_ops.h>
#include <ATen/ops/native_dropout_backward_native.h>
#include <ATen/ops/native_dropout_backward_ops.h>
#include <ATen/ops/native_dropout_backward_native.h>
#include <ATen/ops/native_dropout_backward_ops.h>
#include <ATen/ops/abs_native.h>
#include <ATen/ops/abs_ops.h>
#include <ATen/ops/abs_native.h>
#include <ATen/ops/abs_ops.h>
#include <ATen/ops/abs_native.h>
#include <ATen/ops/abs_ops.h>
#include <ATen/ops/absolute_native.h>
#include <ATen/ops/absolute_ops.h>
#include <ATen/ops/absolute_native.h>
#include <ATen/ops/absolute_ops.h>
#include <ATen/ops/absolute_native.h>
#include <ATen/ops/absolute_ops.h>
#include <ATen/ops/angle_native.h>
#include <ATen/ops/angle_ops.h>
#include <ATen/ops/angle_native.h>
#include <ATen/ops/angle_ops.h>
#include <ATen/ops/sgn_native.h>
#include <ATen/ops/sgn_ops.h>
#include <ATen/ops/sgn_native.h>
#include <ATen/ops/sgn_ops.h>
#include <ATen/ops/sgn_native.h>
#include <ATen/ops/sgn_ops.h>
#include <ATen/ops/_conj_physical_native.h>
#include <ATen/ops/_conj_physical_ops.h>
#include <ATen/ops/_conj_physical_native.h>
#include <ATen/ops/_conj_physical_ops.h>
#include <ATen/ops/conj_physical_native.h>
#include <ATen/ops/conj_physical_ops.h>
#include <ATen/ops/conj_physical_native.h>
#include <ATen/ops/conj_physical_ops.h>
#include <ATen/ops/conj_physical_native.h>
#include <ATen/ops/conj_physical_ops.h>
#include <ATen/ops/acos_native.h>
#include <ATen/ops/acos_ops.h>
#include <ATen/ops/acos_native.h>
#include <ATen/ops/acos_ops.h>
#include <ATen/ops/acos_native.h>
#include <ATen/ops/acos_ops.h>
#include <ATen/ops/arccos_native.h>
#include <ATen/ops/arccos_ops.h>
#include <ATen/ops/arccos_native.h>
#include <ATen/ops/arccos_ops.h>
#include <ATen/ops/arccos_native.h>
#include <ATen/ops/arccos_ops.h>
#include <ATen/ops/avg_pool1d_native.h>
#include <ATen/ops/avg_pool1d_ops.h>
#include <ATen/ops/avg_pool1d_native.h>
#include <ATen/ops/avg_pool1d_ops.h>
#include <ATen/ops/adaptive_avg_pool1d_native.h>
#include <ATen/ops/adaptive_avg_pool1d_ops.h>
#include <ATen/ops/adaptive_avg_pool1d_native.h>
#include <ATen/ops/adaptive_avg_pool1d_ops.h>
#include <ATen/ops/add_native.h>
#include <ATen/ops/add_ops.h>
#include <ATen/ops/add_native.h>
#include <ATen/ops/add_ops.h>
#include <ATen/ops/add_native.h>
#include <ATen/ops/add_ops.h>
#include <ATen/ops/_add_relu_native.h>
#include <ATen/ops/_add_relu_ops.h>
#include <ATen/ops/_add_relu_native.h>
#include <ATen/ops/_add_relu_ops.h>
#include <ATen/ops/_add_relu_native.h>
#include <ATen/ops/_add_relu_ops.h>
#include <ATen/ops/_add_relu_native.h>
#include <ATen/ops/_add_relu_ops.h>
#include <ATen/ops/_add_relu_native.h>
#include <ATen/ops/_add_relu_ops.h>
#include <ATen/ops/_add_relu_native.h>
#include <ATen/ops/_add_relu_ops.h>
#include <ATen/ops/add_native.h>
#include <ATen/ops/add_ops.h>
#include <ATen/ops/add_native.h>
#include <ATen/ops/add_ops.h>
#include <ATen/ops/add_native.h>
#include <ATen/ops/add_ops.h>
#include <ATen/ops/addmv_native.h>
#include <ATen/ops/addmv_ops.h>
#include <ATen/ops/addmv_native.h>
#include <ATen/ops/addmv_ops.h>
#include <ATen/ops/addmv_native.h>
#include <ATen/ops/addmv_ops.h>
#include <ATen/ops/addr_native.h>
#include <ATen/ops/addr_ops.h>
#include <ATen/ops/addr_native.h>
#include <ATen/ops/addr_ops.h>
#include <ATen/ops/addr_native.h>
#include <ATen/ops/addr_ops.h>
#include <ATen/ops/affine_grid_generator_native.h>
#include <ATen/ops/affine_grid_generator_ops.h>
#include <ATen/ops/affine_grid_generator_native.h>
#include <ATen/ops/affine_grid_generator_ops.h>
#include <ATen/ops/_test_functorch_fallback_native.h>
#include <ATen/ops/_test_functorch_fallback_ops.h>
#include <ATen/ops/_test_functorch_fallback_native.h>
#include <ATen/ops/_test_functorch_fallback_ops.h>
#include <ATen/ops/all_native.h>
#include <ATen/ops/all_ops.h>
#include <ATen/ops/all_native.h>
#include <ATen/ops/all_ops.h>
#include <ATen/ops/all_native.h>
#include <ATen/ops/all_ops.h>
#include <ATen/ops/all_native.h>
#include <ATen/ops/all_ops.h>
#include <ATen/ops/all_native.h>
#include <ATen/ops/all_ops.h>
#include <ATen/ops/all_native.h>
#include <ATen/ops/all_ops.h>
#include <ATen/ops/any_native.h>
#include <ATen/ops/any_ops.h>
#include <ATen/ops/any_native.h>
#include <ATen/ops/any_ops.h>
#include <ATen/ops/any_native.h>
#include <ATen/ops/any_ops.h>
#include <ATen/ops/any_native.h>
#include <ATen/ops/any_ops.h>
#include <ATen/ops/any_native.h>
#include <ATen/ops/any_ops.h>
#include <ATen/ops/any_native.h>
#include <ATen/ops/any_ops.h>
#include <ATen/ops/arange_native.h>
#include <ATen/ops/arange_ops.h>
#include <ATen/ops/arange_native.h>
#include <ATen/ops/arange_ops.h>
#include <ATen/ops/arange_native.h>
#include <ATen/ops/arange_ops.h>
#include <ATen/ops/arange_native.h>
#include <ATen/ops/arange_ops.h>
#include <ATen/ops/argmax_native.h>
#include <ATen/ops/argmax_ops.h>
#include <ATen/ops/argmax_native.h>
#include <ATen/ops/argmax_ops.h>
#include <ATen/ops/argmin_native.h>
#include <ATen/ops/argmin_ops.h>
#include <ATen/ops/argmin_native.h>
#include <ATen/ops/argmin_ops.h>
#include <ATen/ops/acosh_native.h>
#include <ATen/ops/acosh_ops.h>
#include <ATen/ops/acosh_native.h>
#include <ATen/ops/acosh_ops.h>
#include <ATen/ops/acosh_native.h>
#include <ATen/ops/acosh_ops.h>
#include <ATen/ops/arccosh_native.h>
#include <ATen/ops/arccosh_ops.h>
#include <ATen/ops/arccosh_native.h>
#include <ATen/ops/arccosh_ops.h>
#include <ATen/ops/arccosh_native.h>
#include <ATen/ops/arccosh_ops.h>
#include <ATen/ops/asinh_native.h>
#include <ATen/ops/asinh_ops.h>
#include <ATen/ops/asinh_native.h>
#include <ATen/ops/asinh_ops.h>
#include <ATen/ops/asinh_native.h>
#include <ATen/ops/asinh_ops.h>
#include <ATen/ops/arcsinh_native.h>
#include <ATen/ops/arcsinh_ops.h>
#include <ATen/ops/arcsinh_native.h>
#include <ATen/ops/arcsinh_ops.h>
#include <ATen/ops/arcsinh_native.h>
#include <ATen/ops/arcsinh_ops.h>
#include <ATen/ops/atanh_native.h>
#include <ATen/ops/atanh_ops.h>
#include <ATen/ops/atanh_native.h>
#include <ATen/ops/atanh_ops.h>
#include <ATen/ops/atanh_native.h>
#include <ATen/ops/atanh_ops.h>
#include <ATen/ops/arctanh_native.h>
#include <ATen/ops/arctanh_ops.h>
#include <ATen/ops/arctanh_native.h>
#include <ATen/ops/arctanh_ops.h>
#include <ATen/ops/arctanh_native.h>
#include <ATen/ops/arctanh_ops.h>
#include <ATen/ops/asin_native.h>
#include <ATen/ops/asin_ops.h>
#include <ATen/ops/asin_native.h>
#include <ATen/ops/asin_ops.h>
#include <ATen/ops/asin_native.h>
#include <ATen/ops/asin_ops.h>
#include <ATen/ops/arcsin_native.h>
#include <ATen/ops/arcsin_ops.h>
#include <ATen/ops/arcsin_native.h>
#include <ATen/ops/arcsin_ops.h>
#include <ATen/ops/arcsin_native.h>
#include <ATen/ops/arcsin_ops.h>
#include <ATen/ops/atan_native.h>
#include <ATen/ops/atan_ops.h>
#include <ATen/ops/atan_native.h>
#include <ATen/ops/atan_ops.h>
#include <ATen/ops/atan_native.h>
#include <ATen/ops/atan_ops.h>
#include <ATen/ops/arctan_native.h>
#include <ATen/ops/arctan_ops.h>
#include <ATen/ops/arctan_native.h>
#include <ATen/ops/arctan_ops.h>
#include <ATen/ops/arctan_native.h>
#include <ATen/ops/arctan_ops.h>
#include <ATen/ops/baddbmm_native.h>
#include <ATen/ops/baddbmm_ops.h>
#include <ATen/ops/baddbmm_native.h>
#include <ATen/ops/baddbmm_ops.h>
#include <ATen/ops/baddbmm_native.h>
#include <ATen/ops/baddbmm_ops.h>
#include <ATen/ops/bartlett_window_native.h>
#include <ATen/ops/bartlett_window_ops.h>
#include <ATen/ops/bartlett_window_native.h>
#include <ATen/ops/bartlett_window_ops.h>
#include <ATen/ops/bartlett_window_native.h>
#include <ATen/ops/bartlett_window_ops.h>
#include <ATen/ops/bartlett_window_native.h>
#include <ATen/ops/bartlett_window_ops.h>
#include <ATen/ops/quantized_batch_norm_native.h>
#include <ATen/ops/quantized_batch_norm_ops.h>
#include <ATen/ops/quantized_batch_norm_native.h>
#include <ATen/ops/quantized_batch_norm_ops.h>
#include <ATen/ops/bernoulli_native.h>
#include <ATen/ops/bernoulli_ops.h>
#include <ATen/ops/bernoulli_native.h>
#include <ATen/ops/bernoulli_ops.h>
#include <ATen/ops/bernoulli_native.h>
#include <ATen/ops/bernoulli_ops.h>
#include <ATen/ops/bernoulli_native.h>
#include <ATen/ops/bernoulli_ops.h>
#include <ATen/ops/bernoulli_native.h>
#include <ATen/ops/bernoulli_ops.h>
#include <ATen/ops/bernoulli_native.h>
#include <ATen/ops/bernoulli_ops.h>
#include <ATen/ops/bernoulli_native.h>
#include <ATen/ops/bernoulli_ops.h>
#include <ATen/ops/bernoulli_native.h>
#include <ATen/ops/bernoulli_ops.h>
#include <ATen/ops/binary_cross_entropy_native.h>
#include <ATen/ops/binary_cross_entropy_ops.h>
#include <ATen/ops/binary_cross_entropy_native.h>
#include <ATen/ops/binary_cross_entropy_ops.h>
#include <ATen/ops/binary_cross_entropy_backward_native.h>
#include <ATen/ops/binary_cross_entropy_backward_ops.h>
#include <ATen/ops/binary_cross_entropy_backward_native.h>
#include <ATen/ops/binary_cross_entropy_backward_ops.h>
#include <ATen/ops/binary_cross_entropy_with_logits_native.h>
#include <ATen/ops/binary_cross_entropy_with_logits_ops.h>
#include <ATen/ops/binary_cross_entropy_with_logits_native.h>
#include <ATen/ops/binary_cross_entropy_with_logits_ops.h>
#include <ATen/ops/bincount_native.h>
#include <ATen/ops/bincount_ops.h>
#include <ATen/ops/bincount_native.h>
#include <ATen/ops/bincount_ops.h>
#include <ATen/ops/bitwise_not_native.h>
#include <ATen/ops/bitwise_not_ops.h>
#include <ATen/ops/bitwise_not_native.h>
#include <ATen/ops/bitwise_not_ops.h>
#include <ATen/ops/bitwise_not_native.h>
#include <ATen/ops/bitwise_not_ops.h>
#include <ATen/ops/copysign_native.h>
#include <ATen/ops/copysign_ops.h>
#include <ATen/ops/copysign_native.h>
#include <ATen/ops/copysign_ops.h>
#include <ATen/ops/copysign_native.h>
#include <ATen/ops/copysign_ops.h>
#include <ATen/ops/copysign_native.h>
#include <ATen/ops/copysign_ops.h>
#include <ATen/ops/copysign_native.h>
#include <ATen/ops/copysign_ops.h>
#include <ATen/ops/copysign_native.h>
#include <ATen/ops/copysign_ops.h>
#include <ATen/ops/logical_not_native.h>
#include <ATen/ops/logical_not_ops.h>
#include <ATen/ops/logical_not_native.h>
#include <ATen/ops/logical_not_ops.h>
#include <ATen/ops/logical_not_native.h>
#include <ATen/ops/logical_not_ops.h>
#include <ATen/ops/logical_xor_native.h>
#include <ATen/ops/logical_xor_ops.h>
#include <ATen/ops/logical_xor_native.h>
#include <ATen/ops/logical_xor_ops.h>
#include <ATen/ops/logical_xor_native.h>
#include <ATen/ops/logical_xor_ops.h>
#include <ATen/ops/logical_and_native.h>
#include <ATen/ops/logical_and_ops.h>
#include <ATen/ops/logical_and_native.h>
#include <ATen/ops/logical_and_ops.h>
#include <ATen/ops/logical_and_native.h>
#include <ATen/ops/logical_and_ops.h>
#include <ATen/ops/logical_or_native.h>
#include <ATen/ops/logical_or_ops.h>
#include <ATen/ops/logical_or_native.h>
#include <ATen/ops/logical_or_ops.h>
#include <ATen/ops/logical_or_native.h>
#include <ATen/ops/logical_or_ops.h>
#include <ATen/ops/blackman_window_native.h>
#include <ATen/ops/blackman_window_ops.h>
#include <ATen/ops/blackman_window_native.h>
#include <ATen/ops/blackman_window_ops.h>
#include <ATen/ops/blackman_window_native.h>
#include <ATen/ops/blackman_window_ops.h>
#include <ATen/ops/blackman_window_native.h>
#include <ATen/ops/blackman_window_ops.h>
#include <ATen/ops/bmm_native.h>
#include <ATen/ops/bmm_ops.h>
#include <ATen/ops/bmm_native.h>
#include <ATen/ops/bmm_ops.h>
#include <ATen/ops/cat_native.h>
#include <ATen/ops/cat_ops.h>
#include <ATen/ops/cat_native.h>
#include <ATen/ops/cat_ops.h>
#include <ATen/ops/cat_native.h>
#include <ATen/ops/cat_ops.h>
#include <ATen/ops/cat_native.h>
#include <ATen/ops/cat_ops.h>
#include <ATen/ops/concat_native.h>
#include <ATen/ops/concat_ops.h>
#include <ATen/ops/concat_native.h>
#include <ATen/ops/concat_ops.h>
#include <ATen/ops/concat_native.h>
#include <ATen/ops/concat_ops.h>
#include <ATen/ops/concat_native.h>
#include <ATen/ops/concat_ops.h>
#include <ATen/ops/concatenate_native.h>
#include <ATen/ops/concatenate_ops.h>
#include <ATen/ops/concatenate_native.h>
#include <ATen/ops/concatenate_ops.h>
#include <ATen/ops/concatenate_native.h>
#include <ATen/ops/concatenate_ops.h>
#include <ATen/ops/concatenate_native.h>
#include <ATen/ops/concatenate_ops.h>
#include <ATen/ops/block_diag_native.h>
#include <ATen/ops/block_diag_ops.h>
#include <ATen/ops/block_diag_native.h>
#include <ATen/ops/block_diag_ops.h>
#include <ATen/ops/ceil_native.h>
#include <ATen/ops/ceil_ops.h>
#include <ATen/ops/ceil_native.h>
#include <ATen/ops/ceil_ops.h>
#include <ATen/ops/ceil_native.h>
#include <ATen/ops/ceil_ops.h>
#include <ATen/ops/chain_matmul_native.h>
#include <ATen/ops/chain_matmul_ops.h>
#include <ATen/ops/chain_matmul_native.h>
#include <ATen/ops/chain_matmul_ops.h>
#include <ATen/ops/clamp_native.h>
#include <ATen/ops/clamp_ops.h>
#include <ATen/ops/clamp_native.h>
#include <ATen/ops/clamp_ops.h>
#include <ATen/ops/clamp_native.h>
#include <ATen/ops/clamp_ops.h>
#include <ATen/ops/clamp_native.h>
#include <ATen/ops/clamp_ops.h>
#include <ATen/ops/clamp_native.h>
#include <ATen/ops/clamp_ops.h>
#include <ATen/ops/clamp_native.h>
#include <ATen/ops/clamp_ops.h>
#include <ATen/ops/clamp_max_native.h>
#include <ATen/ops/clamp_max_ops.h>
#include <ATen/ops/clamp_max_native.h>
#include <ATen/ops/clamp_max_ops.h>
#include <ATen/ops/clamp_max_native.h>
#include <ATen/ops/clamp_max_ops.h>
#include <ATen/ops/clamp_max_native.h>
#include <ATen/ops/clamp_max_ops.h>
#include <ATen/ops/clamp_max_native.h>
#include <ATen/ops/clamp_max_ops.h>
#include <ATen/ops/clamp_max_native.h>
#include <ATen/ops/clamp_max_ops.h>
#include <ATen/ops/clamp_min_native.h>
#include <ATen/ops/clamp_min_ops.h>
#include <ATen/ops/clamp_min_native.h>
#include <ATen/ops/clamp_min_ops.h>
#include <ATen/ops/clamp_min_native.h>
#include <ATen/ops/clamp_min_ops.h>
#include <ATen/ops/clamp_min_native.h>
#include <ATen/ops/clamp_min_ops.h>
#include <ATen/ops/clamp_min_native.h>
#include <ATen/ops/clamp_min_ops.h>
#include <ATen/ops/clamp_min_native.h>
#include <ATen/ops/clamp_min_ops.h>
#include <ATen/ops/clip_native.h>
#include <ATen/ops/clip_ops.h>
#include <ATen/ops/clip_native.h>
#include <ATen/ops/clip_ops.h>
#include <ATen/ops/clip_native.h>
#include <ATen/ops/clip_ops.h>
#include <ATen/ops/clip_native.h>
#include <ATen/ops/clip_ops.h>
#include <ATen/ops/clip_native.h>
#include <ATen/ops/clip_ops.h>
#include <ATen/ops/clip_native.h>
#include <ATen/ops/clip_ops.h>
#include <ATen/ops/complex_native.h>
#include <ATen/ops/complex_ops.h>
#include <ATen/ops/complex_native.h>
#include <ATen/ops/complex_ops.h>
#include <ATen/ops/polar_native.h>
#include <ATen/ops/polar_ops.h>
#include <ATen/ops/polar_native.h>
#include <ATen/ops/polar_ops.h>
#include <ATen/ops/constant_pad_nd_native.h>
#include <ATen/ops/constant_pad_nd_ops.h>
#include <ATen/ops/constant_pad_nd_native.h>
#include <ATen/ops/constant_pad_nd_ops.h>
#include <ATen/ops/convolution_native.h>
#include <ATen/ops/convolution_ops.h>
#include <ATen/ops/convolution_native.h>
#include <ATen/ops/convolution_ops.h>
#include <ATen/ops/convolution_backward_native.h>
#include <ATen/ops/convolution_backward_ops.h>
#include <ATen/ops/convolution_backward_native.h>
#include <ATen/ops/convolution_backward_ops.h>
#include <ATen/ops/convolution_overrideable_native.h>
#include <ATen/ops/convolution_overrideable_ops.h>
#include <ATen/ops/convolution_overrideable_native.h>
#include <ATen/ops/convolution_overrideable_ops.h>
#include <ATen/ops/convolution_backward_overrideable_native.h>
#include <ATen/ops/convolution_backward_overrideable_ops.h>
#include <ATen/ops/convolution_backward_overrideable_native.h>
#include <ATen/ops/convolution_backward_overrideable_ops.h>
#include <ATen/ops/_convolution_native.h>
#include <ATen/ops/_convolution_ops.h>
#include <ATen/ops/_convolution_native.h>
#include <ATen/ops/_convolution_ops.h>
#include <ATen/ops/conv_tbc_native.h>
#include <ATen/ops/conv_tbc_ops.h>
#include <ATen/ops/conv_tbc_native.h>
#include <ATen/ops/conv_tbc_ops.h>
#include <ATen/ops/copy_native.h>
#include <ATen/ops/copy_ops.h>
#include <ATen/ops/copy_native.h>
#include <ATen/ops/copy_ops.h>
#include <ATen/ops/copy_native.h>
#include <ATen/ops/copy_ops.h>
#include <ATen/ops/_copy_from_native.h>
#include <ATen/ops/_copy_from_ops.h>
#include <ATen/ops/_copy_from_native.h>
#include <ATen/ops/_copy_from_ops.h>
#include <ATen/ops/_copy_from_and_resize_native.h>
#include <ATen/ops/_copy_from_and_resize_ops.h>
#include <ATen/ops/_copy_from_and_resize_native.h>
#include <ATen/ops/_copy_from_and_resize_ops.h>
#include <ATen/ops/cos_native.h>
#include <ATen/ops/cos_ops.h>
#include <ATen/ops/cos_native.h>
#include <ATen/ops/cos_ops.h>
#include <ATen/ops/cos_native.h>
#include <ATen/ops/cos_ops.h>
#include <ATen/ops/cosh_native.h>
#include <ATen/ops/cosh_ops.h>
#include <ATen/ops/cosh_native.h>
#include <ATen/ops/cosh_ops.h>
#include <ATen/ops/cosh_native.h>
#include <ATen/ops/cosh_ops.h>
#include <ATen/ops/count_nonzero_native.h>
#include <ATen/ops/count_nonzero_ops.h>
#include <ATen/ops/count_nonzero_native.h>
#include <ATen/ops/count_nonzero_ops.h>
#include <ATen/ops/count_nonzero_native.h>
#include <ATen/ops/count_nonzero_ops.h>
#include <ATen/ops/count_nonzero_native.h>
#include <ATen/ops/count_nonzero_ops.h>
#include <ATen/ops/cudnn_affine_grid_generator_native.h>
#include <ATen/ops/cudnn_affine_grid_generator_ops.h>
#include <ATen/ops/cudnn_affine_grid_generator_native.h>
#include <ATen/ops/cudnn_affine_grid_generator_ops.h>
#include <ATen/ops/cudnn_affine_grid_generator_backward_native.h>
#include <ATen/ops/cudnn_affine_grid_generator_backward_ops.h>
#include <ATen/ops/cudnn_affine_grid_generator_backward_native.h>
#include <ATen/ops/cudnn_affine_grid_generator_backward_ops.h>
#include <ATen/ops/cudnn_batch_norm_native.h>
#include <ATen/ops/cudnn_batch_norm_ops.h>
#include <ATen/ops/cudnn_batch_norm_native.h>
#include <ATen/ops/cudnn_batch_norm_ops.h>
#include <ATen/ops/cudnn_batch_norm_backward_native.h>
#include <ATen/ops/cudnn_batch_norm_backward_ops.h>
#include <ATen/ops/cudnn_batch_norm_backward_native.h>
#include <ATen/ops/cudnn_batch_norm_backward_ops.h>
#include <ATen/ops/cudnn_convolution_native.h>
#include <ATen/ops/cudnn_convolution_ops.h>
#include <ATen/ops/cudnn_convolution_native.h>
#include <ATen/ops/cudnn_convolution_ops.h>
#include <ATen/ops/cudnn_convolution_transpose_native.h>
#include <ATen/ops/cudnn_convolution_transpose_ops.h>
#include <ATen/ops/cudnn_convolution_transpose_native.h>
#include <ATen/ops/cudnn_convolution_transpose_ops.h>
#include <ATen/ops/_mps_convolution_transpose_native.h>
#include <ATen/ops/_mps_convolution_transpose_ops.h>
#include <ATen/ops/_mps_convolution_transpose_native.h>
#include <ATen/ops/_mps_convolution_transpose_ops.h>
#include <ATen/ops/mps_convolution_transpose_backward_native.h>
#include <ATen/ops/mps_convolution_transpose_backward_ops.h>
#include <ATen/ops/mps_convolution_transpose_backward_native.h>
#include <ATen/ops/mps_convolution_transpose_backward_ops.h>
#include <ATen/ops/cudnn_convolution_relu_native.h>
#include <ATen/ops/cudnn_convolution_relu_ops.h>
#include <ATen/ops/cudnn_convolution_relu_native.h>
#include <ATen/ops/cudnn_convolution_relu_ops.h>
#include <ATen/ops/cudnn_convolution_add_relu_native.h>
#include <ATen/ops/cudnn_convolution_add_relu_ops.h>
#include <ATen/ops/cudnn_convolution_add_relu_native.h>
#include <ATen/ops/cudnn_convolution_add_relu_ops.h>
#include <ATen/ops/cudnn_grid_sampler_native.h>
#include <ATen/ops/cudnn_grid_sampler_ops.h>
#include <ATen/ops/cudnn_grid_sampler_native.h>
#include <ATen/ops/cudnn_grid_sampler_ops.h>
#include <ATen/ops/cudnn_grid_sampler_backward_native.h>
#include <ATen/ops/cudnn_grid_sampler_backward_ops.h>
#include <ATen/ops/cudnn_grid_sampler_backward_native.h>
#include <ATen/ops/cudnn_grid_sampler_backward_ops.h>
#include <ATen/ops/cummax_native.h>
#include <ATen/ops/cummax_ops.h>
#include <ATen/ops/cummax_native.h>
#include <ATen/ops/cummax_ops.h>
#include <ATen/ops/cummax_native.h>
#include <ATen/ops/cummax_ops.h>
#include <ATen/ops/cummax_native.h>
#include <ATen/ops/cummax_ops.h>
#include <ATen/ops/cummin_native.h>
#include <ATen/ops/cummin_ops.h>
#include <ATen/ops/cummin_native.h>
#include <ATen/ops/cummin_ops.h>
#include <ATen/ops/cummin_native.h>
#include <ATen/ops/cummin_ops.h>
#include <ATen/ops/cummin_native.h>
#include <ATen/ops/cummin_ops.h>
#include <ATen/ops/cumprod_native.h>
#include <ATen/ops/cumprod_ops.h>
#include <ATen/ops/cumprod_native.h>
#include <ATen/ops/cumprod_ops.h>
#include <ATen/ops/cumprod_native.h>
#include <ATen/ops/cumprod_ops.h>
#include <ATen/ops/cumprod_native.h>
#include <ATen/ops/cumprod_ops.h>
#include <ATen/ops/cumprod_native.h>
#include <ATen/ops/cumprod_ops.h>
#include <ATen/ops/cumprod_native.h>
#include <ATen/ops/cumprod_ops.h>
#include <ATen/ops/cumsum_native.h>
#include <ATen/ops/cumsum_ops.h>
#include <ATen/ops/cumsum_native.h>
#include <ATen/ops/cumsum_ops.h>
#include <ATen/ops/cumsum_native.h>
#include <ATen/ops/cumsum_ops.h>
#include <ATen/ops/cumsum_native.h>
#include <ATen/ops/cumsum_ops.h>
#include <ATen/ops/cumsum_native.h>
#include <ATen/ops/cumsum_ops.h>
#include <ATen/ops/cumsum_native.h>
#include <ATen/ops/cumsum_ops.h>
#include <ATen/ops/_ctc_loss_native.h>
#include <ATen/ops/_ctc_loss_ops.h>
#include <ATen/ops/_ctc_loss_native.h>
#include <ATen/ops/_ctc_loss_ops.h>
#include <ATen/ops/_ctc_loss_native.h>
#include <ATen/ops/_ctc_loss_ops.h>
#include <ATen/ops/_ctc_loss_native.h>
#include <ATen/ops/_ctc_loss_ops.h>
#include <ATen/ops/_ctc_loss_backward_native.h>
#include <ATen/ops/_ctc_loss_backward_ops.h>
#include <ATen/ops/_ctc_loss_backward_native.h>
#include <ATen/ops/_ctc_loss_backward_ops.h>
#include <ATen/ops/diag_embed_native.h>
#include <ATen/ops/diag_embed_ops.h>
#include <ATen/ops/diag_embed_native.h>
#include <ATen/ops/diag_embed_ops.h>
#include <ATen/ops/diagonal_backward_native.h>
#include <ATen/ops/diagonal_backward_ops.h>
#include <ATen/ops/diagonal_backward_native.h>
#include <ATen/ops/diagonal_backward_ops.h>
#include <ATen/ops/diff_native.h>
#include <ATen/ops/diff_ops.h>
#include <ATen/ops/diff_native.h>
#include <ATen/ops/diff_ops.h>
#include <ATen/ops/div_native.h>
#include <ATen/ops/div_ops.h>
#include <ATen/ops/div_native.h>
#include <ATen/ops/div_ops.h>
#include <ATen/ops/div_native.h>
#include <ATen/ops/div_ops.h>
#include <ATen/ops/div_native.h>
#include <ATen/ops/div_ops.h>
#include <ATen/ops/div_native.h>
#include <ATen/ops/div_ops.h>
#include <ATen/ops/div_native.h>
#include <ATen/ops/div_ops.h>
#include <ATen/ops/div_native.h>
#include <ATen/ops/div_ops.h>
#include <ATen/ops/div_native.h>
#include <ATen/ops/div_ops.h>
#include <ATen/ops/div_native.h>
#include <ATen/ops/div_ops.h>
#include <ATen/ops/div_native.h>
#include <ATen/ops/div_ops.h>
#include <ATen/ops/div_native.h>
#include <ATen/ops/div_ops.h>
#include <ATen/ops/div_native.h>
#include <ATen/ops/div_ops.h>
#include <ATen/ops/divide_native.h>
#include <ATen/ops/divide_ops.h>
#include <ATen/ops/divide_native.h>
#include <ATen/ops/divide_ops.h>
#include <ATen/ops/divide_native.h>
#include <ATen/ops/divide_ops.h>
#include <ATen/ops/divide_native.h>
#include <ATen/ops/divide_ops.h>
#include <ATen/ops/divide_native.h>
#include <ATen/ops/divide_ops.h>
#include <ATen/ops/divide_native.h>
#include <ATen/ops/divide_ops.h>
#include <ATen/ops/true_divide_native.h>
#include <ATen/ops/true_divide_ops.h>
#include <ATen/ops/true_divide_native.h>
#include <ATen/ops/true_divide_ops.h>
#include <ATen/ops/true_divide_native.h>
#include <ATen/ops/true_divide_ops.h>
#include <ATen/ops/dot_native.h>
#include <ATen/ops/dot_ops.h>
#include <ATen/ops/dot_native.h>
#include <ATen/ops/dot_ops.h>
#include <ATen/ops/vdot_native.h>
#include <ATen/ops/vdot_ops.h>
#include <ATen/ops/vdot_native.h>
#include <ATen/ops/vdot_ops.h>
#include <ATen/ops/embedding_native.h>
#include <ATen/ops/embedding_ops.h>
#include <ATen/ops/embedding_native.h>
#include <ATen/ops/embedding_ops.h>
#include <ATen/ops/embedding_dense_backward_native.h>
#include <ATen/ops/embedding_dense_backward_ops.h>
#include <ATen/ops/embedding_dense_backward_native.h>
#include <ATen/ops/embedding_dense_backward_ops.h>
#include <ATen/ops/embedding_renorm_native.h>
#include <ATen/ops/embedding_renorm_ops.h>
#include <ATen/ops/embedding_renorm_native.h>
#include <ATen/ops/embedding_renorm_ops.h>
#include <ATen/ops/embedding_renorm_native.h>
#include <ATen/ops/embedding_renorm_ops.h>
#include <ATen/ops/_embedding_bag_forward_only_native.h>
#include <ATen/ops/_embedding_bag_forward_only_ops.h>
#include <ATen/ops/_embedding_bag_forward_only_native.h>
#include <ATen/ops/_embedding_bag_forward_only_ops.h>
#include <ATen/ops/row_stack_native.h>
#include <ATen/ops/row_stack_ops.h>
#include <ATen/ops/row_stack_native.h>
#include <ATen/ops/row_stack_ops.h>
#include <ATen/ops/_embedding_bag_native.h>
#include <ATen/ops/_embedding_bag_ops.h>
#include <ATen/ops/_embedding_bag_native.h>
#include <ATen/ops/_embedding_bag_ops.h>
#include <ATen/ops/_embedding_bag_dense_backward_native.h>
#include <ATen/ops/_embedding_bag_dense_backward_ops.h>
#include <ATen/ops/_embedding_bag_dense_backward_native.h>
#include <ATen/ops/_embedding_bag_dense_backward_ops.h>
#include <ATen/ops/_embedding_bag_per_sample_weights_backward_native.h>
#include <ATen/ops/_embedding_bag_per_sample_weights_backward_ops.h>
#include <ATen/ops/_embedding_bag_per_sample_weights_backward_native.h>
#include <ATen/ops/_embedding_bag_per_sample_weights_backward_ops.h>
#include <ATen/ops/empty_native.h>
#include <ATen/ops/empty_ops.h>
#include <ATen/ops/empty_native.h>
#include <ATen/ops/empty_ops.h>
#include <ATen/ops/empty_native.h>
#include <ATen/ops/empty_ops.h>
#include <ATen/ops/empty_native.h>
#include <ATen/ops/empty_ops.h>
#include <ATen/ops/empty_permuted_native.h>
#include <ATen/ops/empty_permuted_ops.h>
#include <ATen/ops/empty_permuted_native.h>
#include <ATen/ops/empty_permuted_ops.h>
#include <ATen/ops/new_empty_native.h>
#include <ATen/ops/new_empty_ops.h>
#include <ATen/ops/new_empty_native.h>
#include <ATen/ops/new_empty_ops.h>
#include <ATen/ops/new_empty_strided_native.h>
#include <ATen/ops/new_empty_strided_ops.h>
#include <ATen/ops/new_empty_strided_native.h>
#include <ATen/ops/new_empty_strided_ops.h>
#include <ATen/ops/new_full_native.h>
#include <ATen/ops/new_full_ops.h>
#include <ATen/ops/new_full_native.h>
#include <ATen/ops/new_full_ops.h>
#include <ATen/ops/new_zeros_native.h>
#include <ATen/ops/new_zeros_ops.h>
#include <ATen/ops/new_zeros_native.h>
#include <ATen/ops/new_zeros_ops.h>
#include <ATen/ops/new_ones_native.h>
#include <ATen/ops/new_ones_ops.h>
#include <ATen/ops/new_ones_native.h>
#include <ATen/ops/new_ones_ops.h>
#include <ATen/ops/_empty_affine_quantized_native.h>
#include <ATen/ops/_empty_affine_quantized_ops.h>
#include <ATen/ops/_empty_affine_quantized_native.h>
#include <ATen/ops/_empty_affine_quantized_ops.h>
#include <ATen/ops/_empty_per_channel_affine_quantized_native.h>
#include <ATen/ops/_empty_per_channel_affine_quantized_ops.h>
#include <ATen/ops/_empty_per_channel_affine_quantized_native.h>
#include <ATen/ops/_empty_per_channel_affine_quantized_ops.h>
#include <ATen/ops/resize_native.h>
#include <ATen/ops/resize_ops.h>
#include <ATen/ops/resize_native.h>
#include <ATen/ops/resize_ops.h>
#include <ATen/ops/resize_native.h>
#include <ATen/ops/resize_ops.h>
#include <ATen/ops/_resize_output_native.h>
#include <ATen/ops/_resize_output_ops.h>
#include <ATen/ops/_resize_output_native.h>
#include <ATen/ops/_resize_output_ops.h>
#include <ATen/ops/_resize_output_native.h>
#include <ATen/ops/_resize_output_ops.h>
#include <ATen/ops/empty_quantized_native.h>
#include <ATen/ops/empty_quantized_ops.h>
#include <ATen/ops/empty_quantized_native.h>
#include <ATen/ops/empty_quantized_ops.h>
#include <ATen/ops/empty_like_native.h>
#include <ATen/ops/empty_like_ops.h>
#include <ATen/ops/empty_like_native.h>
#include <ATen/ops/empty_like_ops.h>
#include <ATen/ops/empty_strided_native.h>
#include <ATen/ops/empty_strided_ops.h>
#include <ATen/ops/empty_strided_native.h>
#include <ATen/ops/empty_strided_ops.h>
#include <ATen/ops/erf_native.h>
#include <ATen/ops/erf_ops.h>
#include <ATen/ops/erf_native.h>
#include <ATen/ops/erf_ops.h>
#include <ATen/ops/erf_native.h>
#include <ATen/ops/erf_ops.h>
#include <ATen/ops/erfc_native.h>
#include <ATen/ops/erfc_ops.h>
#include <ATen/ops/erfc_native.h>
#include <ATen/ops/erfc_ops.h>
#include <ATen/ops/erfc_native.h>
#include <ATen/ops/erfc_ops.h>
#include <ATen/ops/exp_native.h>
#include <ATen/ops/exp_ops.h>
#include <ATen/ops/exp_native.h>
#include <ATen/ops/exp_ops.h>
#include <ATen/ops/exp_native.h>
#include <ATen/ops/exp_ops.h>
#include <ATen/ops/exp2_native.h>
#include <ATen/ops/exp2_ops.h>
#include <ATen/ops/exp2_native.h>
#include <ATen/ops/exp2_ops.h>
#include <ATen/ops/exp2_native.h>
#include <ATen/ops/exp2_ops.h>
#include <ATen/ops/expm1_native.h>
#include <ATen/ops/expm1_ops.h>
#include <ATen/ops/expm1_native.h>
#include <ATen/ops/expm1_ops.h>
#include <ATen/ops/expm1_native.h>
#include <ATen/ops/expm1_ops.h>
#include <ATen/ops/eye_native.h>
#include <ATen/ops/eye_ops.h>
#include <ATen/ops/eye_native.h>
#include <ATen/ops/eye_ops.h>
#include <ATen/ops/eye_native.h>
#include <ATen/ops/eye_ops.h>
#include <ATen/ops/eye_native.h>
#include <ATen/ops/eye_ops.h>
#include <ATen/ops/fill_native.h>
#include <ATen/ops/fill_ops.h>
#include <ATen/ops/fill_native.h>
#include <ATen/ops/fill_ops.h>
#include <ATen/ops/fill_native.h>
#include <ATen/ops/fill_ops.h>
#include <ATen/ops/fill_native.h>
#include <ATen/ops/fill_ops.h>
#include <ATen/ops/fill_native.h>
#include <ATen/ops/fill_ops.h>
#include <ATen/ops/fill_native.h>
#include <ATen/ops/fill_ops.h>
#include <ATen/ops/floor_native.h>
#include <ATen/ops/floor_ops.h>
#include <ATen/ops/floor_native.h>
#include <ATen/ops/floor_ops.h>
#include <ATen/ops/floor_native.h>
#include <ATen/ops/floor_ops.h>
#include <ATen/ops/floor_divide_native.h>
#include <ATen/ops/floor_divide_ops.h>
#include <ATen/ops/floor_divide_native.h>
#include <ATen/ops/floor_divide_ops.h>
#include <ATen/ops/floor_divide_native.h>
#include <ATen/ops/floor_divide_ops.h>
#include <ATen/ops/floor_divide_native.h>
#include <ATen/ops/floor_divide_ops.h>
#include <ATen/ops/floor_divide_native.h>
#include <ATen/ops/floor_divide_ops.h>
#include <ATen/ops/floor_divide_native.h>
#include <ATen/ops/floor_divide_ops.h>
#include <ATen/ops/frac_native.h>
#include <ATen/ops/frac_ops.h>
#include <ATen/ops/frac_native.h>
#include <ATen/ops/frac_ops.h>
#include <ATen/ops/frac_native.h>
#include <ATen/ops/frac_ops.h>
#include <ATen/ops/full_native.h>
#include <ATen/ops/full_ops.h>
#include <ATen/ops/full_native.h>
#include <ATen/ops/full_ops.h>
#include <ATen/ops/full_native.h>
#include <ATen/ops/full_ops.h>
#include <ATen/ops/full_native.h>
#include <ATen/ops/full_ops.h>
#include <ATen/ops/full_like_native.h>
#include <ATen/ops/full_like_ops.h>
#include <ATen/ops/full_like_native.h>
#include <ATen/ops/full_like_ops.h>
#include <ATen/ops/from_file_native.h>
#include <ATen/ops/from_file_ops.h>
#include <ATen/ops/from_file_native.h>
#include <ATen/ops/from_file_ops.h>
#include <ATen/ops/gcd_native.h>
#include <ATen/ops/gcd_ops.h>
#include <ATen/ops/gcd_native.h>
#include <ATen/ops/gcd_ops.h>
#include <ATen/ops/gcd_native.h>
#include <ATen/ops/gcd_ops.h>
#include <ATen/ops/lcm_native.h>
#include <ATen/ops/lcm_ops.h>
#include <ATen/ops/lcm_native.h>
#include <ATen/ops/lcm_ops.h>
#include <ATen/ops/lcm_native.h>
#include <ATen/ops/lcm_ops.h>
#include <ATen/ops/grid_sampler_2d_native.h>
#include <ATen/ops/grid_sampler_2d_ops.h>
#include <ATen/ops/grid_sampler_2d_native.h>
#include <ATen/ops/grid_sampler_2d_ops.h>
#include <ATen/ops/grid_sampler_2d_backward_native.h>
#include <ATen/ops/grid_sampler_2d_backward_ops.h>
#include <ATen/ops/grid_sampler_2d_backward_native.h>
#include <ATen/ops/grid_sampler_2d_backward_ops.h>
#include <ATen/ops/_grid_sampler_2d_cpu_fallback_native.h>
#include <ATen/ops/_grid_sampler_2d_cpu_fallback_ops.h>
#include <ATen/ops/_grid_sampler_2d_cpu_fallback_native.h>
#include <ATen/ops/_grid_sampler_2d_cpu_fallback_ops.h>
#include <ATen/ops/grid_sampler_3d_native.h>
#include <ATen/ops/grid_sampler_3d_ops.h>
#include <ATen/ops/grid_sampler_3d_native.h>
#include <ATen/ops/grid_sampler_3d_ops.h>
#include <ATen/ops/grid_sampler_3d_backward_native.h>
#include <ATen/ops/grid_sampler_3d_backward_ops.h>
#include <ATen/ops/grid_sampler_3d_backward_native.h>
#include <ATen/ops/grid_sampler_3d_backward_ops.h>
#include <ATen/ops/hann_window_native.h>
#include <ATen/ops/hann_window_ops.h>
#include <ATen/ops/hann_window_native.h>
#include <ATen/ops/hann_window_ops.h>
#include <ATen/ops/hann_window_native.h>
#include <ATen/ops/hann_window_ops.h>
#include <ATen/ops/hann_window_native.h>
#include <ATen/ops/hann_window_ops.h>
#include <ATen/ops/hamming_window_native.h>
#include <ATen/ops/hamming_window_ops.h>
#include <ATen/ops/hamming_window_native.h>
#include <ATen/ops/hamming_window_ops.h>
#include <ATen/ops/hamming_window_native.h>
#include <ATen/ops/hamming_window_ops.h>
#include <ATen/ops/hamming_window_native.h>
#include <ATen/ops/hamming_window_ops.h>
#include <ATen/ops/hamming_window_native.h>
#include <ATen/ops/hamming_window_ops.h>
#include <ATen/ops/hamming_window_native.h>
#include <ATen/ops/hamming_window_ops.h>
#include <ATen/ops/hamming_window_native.h>
#include <ATen/ops/hamming_window_ops.h>
#include <ATen/ops/hamming_window_native.h>
#include <ATen/ops/hamming_window_ops.h>
#include <ATen/ops/kaiser_window_native.h>
#include <ATen/ops/kaiser_window_ops.h>
#include <ATen/ops/kaiser_window_native.h>
#include <ATen/ops/kaiser_window_ops.h>
#include <ATen/ops/kaiser_window_native.h>
#include <ATen/ops/kaiser_window_ops.h>
#include <ATen/ops/kaiser_window_native.h>
#include <ATen/ops/kaiser_window_ops.h>
#include <ATen/ops/kaiser_window_native.h>
#include <ATen/ops/kaiser_window_ops.h>
#include <ATen/ops/kaiser_window_native.h>
#include <ATen/ops/kaiser_window_ops.h>
#include <ATen/ops/native_group_norm_native.h>
#include <ATen/ops/native_group_norm_ops.h>
#include <ATen/ops/native_group_norm_native.h>
#include <ATen/ops/native_group_norm_ops.h>
#include <ATen/ops/native_group_norm_backward_native.h>
#include <ATen/ops/native_group_norm_backward_ops.h>
#include <ATen/ops/native_group_norm_backward_native.h>
#include <ATen/ops/native_group_norm_backward_ops.h>
#include <ATen/ops/_fft_r2c_native.h>
#include <ATen/ops/_fft_r2c_ops.h>
#include <ATen/ops/_fft_r2c_native.h>
#include <ATen/ops/_fft_r2c_ops.h>
#include <ATen/ops/_fft_c2r_native.h>
#include <ATen/ops/_fft_c2r_ops.h>
#include <ATen/ops/_fft_c2r_native.h>
#include <ATen/ops/_fft_c2r_ops.h>
#include <ATen/ops/_fft_c2c_native.h>
#include <ATen/ops/_fft_c2c_ops.h>
#include <ATen/ops/_fft_c2c_native.h>
#include <ATen/ops/_fft_c2c_ops.h>
#include <ATen/ops/index_native.h>
#include <ATen/ops/index_ops.h>
#include <ATen/ops/index_native.h>
#include <ATen/ops/index_ops.h>
#include <ATen/ops/index_copy_native.h>
#include <ATen/ops/index_copy_ops.h>
#include <ATen/ops/index_copy_native.h>
#include <ATen/ops/index_copy_ops.h>
#include <ATen/ops/index_copy_native.h>
#include <ATen/ops/index_copy_ops.h>
#include <ATen/ops/index_put_native.h>
#include <ATen/ops/index_put_ops.h>
#include <ATen/ops/index_put_native.h>
#include <ATen/ops/index_put_ops.h>
#include <ATen/ops/index_put_native.h>
#include <ATen/ops/index_put_ops.h>
#include <ATen/ops/_index_put_impl_native.h>
#include <ATen/ops/_index_put_impl_ops.h>
#include <ATen/ops/_index_put_impl_native.h>
#include <ATen/ops/_index_put_impl_ops.h>
#include <ATen/ops/_index_put_impl_native.h>
#include <ATen/ops/_index_put_impl_ops.h>
#include <ATen/ops/isin_native.h>
#include <ATen/ops/isin_ops.h>
#include <ATen/ops/isin_native.h>
#include <ATen/ops/isin_ops.h>
#include <ATen/ops/isin_native.h>
#include <ATen/ops/isin_ops.h>
#include <ATen/ops/isin_native.h>
#include <ATen/ops/isin_ops.h>
#include <ATen/ops/isin_native.h>
#include <ATen/ops/isin_ops.h>
#include <ATen/ops/isin_native.h>
#include <ATen/ops/isin_ops.h>
#include <ATen/ops/isnan_native.h>
#include <ATen/ops/isnan_ops.h>
#include <ATen/ops/isnan_native.h>
#include <ATen/ops/isnan_ops.h>
#include <ATen/ops/kron_native.h>
#include <ATen/ops/kron_ops.h>
#include <ATen/ops/kron_native.h>
#include <ATen/ops/kron_ops.h>
#include <ATen/ops/kthvalue_native.h>
#include <ATen/ops/kthvalue_ops.h>
#include <ATen/ops/kthvalue_native.h>
#include <ATen/ops/kthvalue_ops.h>
#include <ATen/ops/kthvalue_native.h>
#include <ATen/ops/kthvalue_ops.h>
#include <ATen/ops/kthvalue_native.h>
#include <ATen/ops/kthvalue_ops.h>
#include <ATen/ops/native_layer_norm_native.h>
#include <ATen/ops/native_layer_norm_ops.h>
#include <ATen/ops/native_layer_norm_native.h>
#include <ATen/ops/native_layer_norm_ops.h>
#include <ATen/ops/native_layer_norm_backward_native.h>
#include <ATen/ops/native_layer_norm_backward_ops.h>
#include <ATen/ops/native_layer_norm_backward_native.h>
#include <ATen/ops/native_layer_norm_backward_ops.h>
#include <ATen/ops/nan_to_num_native.h>
#include <ATen/ops/nan_to_num_ops.h>
#include <ATen/ops/nan_to_num_native.h>
#include <ATen/ops/nan_to_num_ops.h>
#include <ATen/ops/nan_to_num_native.h>
#include <ATen/ops/nan_to_num_ops.h>
#include <ATen/ops/linear_native.h>
#include <ATen/ops/linear_ops.h>
#include <ATen/ops/linear_native.h>
#include <ATen/ops/linear_ops.h>
#include <ATen/ops/linear_backward_native.h>
#include <ATen/ops/linear_backward_ops.h>
#include <ATen/ops/linear_backward_native.h>
#include <ATen/ops/linear_backward_ops.h>
#include <ATen/ops/mkldnn_linear_native.h>
#include <ATen/ops/mkldnn_linear_ops.h>
#include <ATen/ops/mkldnn_linear_native.h>
#include <ATen/ops/mkldnn_linear_ops.h>
#include <ATen/ops/mkldnn_linear_backward_input_native.h>
#include <ATen/ops/mkldnn_linear_backward_input_ops.h>
#include <ATen/ops/mkldnn_linear_backward_input_native.h>
#include <ATen/ops/mkldnn_linear_backward_input_ops.h>
#include <ATen/ops/mkldnn_linear_backward_weights_native.h>
#include <ATen/ops/mkldnn_linear_backward_weights_ops.h>
#include <ATen/ops/mkldnn_linear_backward_weights_native.h>
#include <ATen/ops/mkldnn_linear_backward_weights_ops.h>
#include <ATen/ops/mkldnn_linear_backward_native.h>
#include <ATen/ops/mkldnn_linear_backward_ops.h>
#include <ATen/ops/mkldnn_linear_backward_native.h>
#include <ATen/ops/mkldnn_linear_backward_ops.h>
#include <ATen/ops/ldexp_native.h>
#include <ATen/ops/ldexp_ops.h>
#include <ATen/ops/ldexp_native.h>
#include <ATen/ops/ldexp_ops.h>
#include <ATen/ops/ldexp_native.h>
#include <ATen/ops/ldexp_ops.h>
#include <ATen/ops/linspace_native.h>
#include <ATen/ops/linspace_ops.h>
#include <ATen/ops/linspace_native.h>
#include <ATen/ops/linspace_ops.h>
#include <ATen/ops/linspace_native.h>
#include <ATen/ops/linspace_ops.h>
#include <ATen/ops/linspace_native.h>
#include <ATen/ops/linspace_ops.h>
#include <ATen/ops/linspace_native.h>
#include <ATen/ops/linspace_ops.h>
#include <ATen/ops/linspace_native.h>
#include <ATen/ops/linspace_ops.h>
#include <ATen/ops/linspace_native.h>
#include <ATen/ops/linspace_ops.h>
#include <ATen/ops/linspace_native.h>
#include <ATen/ops/linspace_ops.h>
#include <ATen/ops/log_native.h>
#include <ATen/ops/log_ops.h>
#include <ATen/ops/log_native.h>
#include <ATen/ops/log_ops.h>
#include <ATen/ops/log_native.h>
#include <ATen/ops/log_ops.h>
#include <ATen/ops/log10_native.h>
#include <ATen/ops/log10_ops.h>
#include <ATen/ops/log10_native.h>
#include <ATen/ops/log10_ops.h>
#include <ATen/ops/log10_native.h>
#include <ATen/ops/log10_ops.h>
#include <ATen/ops/log1p_native.h>
#include <ATen/ops/log1p_ops.h>
#include <ATen/ops/log1p_native.h>
#include <ATen/ops/log1p_ops.h>
#include <ATen/ops/log1p_native.h>
#include <ATen/ops/log1p_ops.h>
#include <ATen/ops/log2_native.h>
#include <ATen/ops/log2_ops.h>
#include <ATen/ops/log2_native.h>
#include <ATen/ops/log2_ops.h>
#include <ATen/ops/log2_native.h>
#include <ATen/ops/log2_ops.h>
#include <ATen/ops/logaddexp_native.h>
#include <ATen/ops/logaddexp_ops.h>
#include <ATen/ops/logaddexp_native.h>
#include <ATen/ops/logaddexp_ops.h>
#include <ATen/ops/logaddexp2_native.h>
#include <ATen/ops/logaddexp2_ops.h>
#include <ATen/ops/logaddexp2_native.h>
#include <ATen/ops/logaddexp2_ops.h>
#include <ATen/ops/xlogy_native.h>
#include <ATen/ops/xlogy_ops.h>
#include <ATen/ops/xlogy_native.h>
#include <ATen/ops/xlogy_ops.h>
#include <ATen/ops/xlogy_native.h>
#include <ATen/ops/xlogy_ops.h>
#include <ATen/ops/xlogy_native.h>
#include <ATen/ops/xlogy_ops.h>
#include <ATen/ops/xlogy_native.h>
#include <ATen/ops/xlogy_ops.h>
#include <ATen/ops/xlogy_native.h>
#include <ATen/ops/xlogy_ops.h>
#include <ATen/ops/xlogy_native.h>
#include <ATen/ops/xlogy_ops.h>
#include <ATen/ops/xlogy_native.h>
#include <ATen/ops/xlogy_ops.h>
#include <ATen/ops/logspace_native.h>
#include <ATen/ops/logspace_ops.h>
#include <ATen/ops/logspace_native.h>
#include <ATen/ops/logspace_ops.h>
#include <ATen/ops/logspace_native.h>
#include <ATen/ops/logspace_ops.h>
#include <ATen/ops/logspace_native.h>
#include <ATen/ops/logspace_ops.h>
#include <ATen/ops/logspace_native.h>
#include <ATen/ops/logspace_ops.h>
#include <ATen/ops/logspace_native.h>
#include <ATen/ops/logspace_ops.h>
#include <ATen/ops/logspace_native.h>
#include <ATen/ops/logspace_ops.h>
#include <ATen/ops/logspace_native.h>
#include <ATen/ops/logspace_ops.h>
#include <ATen/ops/log_softmax_native.h>
#include <ATen/ops/log_softmax_ops.h>
#include <ATen/ops/log_softmax_native.h>
#include <ATen/ops/log_softmax_ops.h>
#include <ATen/ops/_log_softmax_native.h>
#include <ATen/ops/_log_softmax_ops.h>
#include <ATen/ops/_log_softmax_native.h>
#include <ATen/ops/_log_softmax_ops.h>
#include <ATen/ops/_log_softmax_backward_data_native.h>
#include <ATen/ops/_log_softmax_backward_data_ops.h>
#include <ATen/ops/_log_softmax_backward_data_native.h>
#include <ATen/ops/_log_softmax_backward_data_ops.h>
#include <ATen/ops/_logcumsumexp_native.h>
#include <ATen/ops/_logcumsumexp_ops.h>
#include <ATen/ops/_logcumsumexp_native.h>
#include <ATen/ops/_logcumsumexp_ops.h>
#include <ATen/ops/logcumsumexp_native.h>
#include <ATen/ops/logcumsumexp_ops.h>
#include <ATen/ops/logcumsumexp_native.h>
#include <ATen/ops/logcumsumexp_ops.h>
#include <ATen/ops/logcumsumexp_native.h>
#include <ATen/ops/logcumsumexp_ops.h>
#include <ATen/ops/logcumsumexp_native.h>
#include <ATen/ops/logcumsumexp_ops.h>
#include <ATen/ops/logsumexp_native.h>
#include <ATen/ops/logsumexp_ops.h>
#include <ATen/ops/logsumexp_native.h>
#include <ATen/ops/logsumexp_ops.h>
#include <ATen/ops/logsumexp_native.h>
#include <ATen/ops/logsumexp_ops.h>
#include <ATen/ops/logsumexp_native.h>
#include <ATen/ops/logsumexp_ops.h>
#include <ATen/ops/matmul_native.h>
#include <ATen/ops/matmul_ops.h>
#include <ATen/ops/matmul_native.h>
#include <ATen/ops/matmul_ops.h>
#include <ATen/ops/matmul_backward_native.h>
#include <ATen/ops/matmul_backward_ops.h>
#include <ATen/ops/matmul_backward_native.h>
#include <ATen/ops/matmul_backward_ops.h>
#include <ATen/ops/matrix_power_native.h>
#include <ATen/ops/matrix_power_ops.h>
#include <ATen/ops/matrix_power_native.h>
#include <ATen/ops/matrix_power_ops.h>
#include <ATen/ops/_aminmax_native.h>
#include <ATen/ops/_aminmax_ops.h>
#include <ATen/ops/_aminmax_native.h>
#include <ATen/ops/_aminmax_ops.h>
#include <ATen/ops/_aminmax_native.h>
#include <ATen/ops/_aminmax_ops.h>
#include <ATen/ops/_aminmax_native.h>
#include <ATen/ops/_aminmax_ops.h>
#include <ATen/ops/aminmax_native.h>
#include <ATen/ops/aminmax_ops.h>
#include <ATen/ops/aminmax_native.h>
#include <ATen/ops/aminmax_ops.h>
#include <ATen/ops/_compute_linear_combination_native.h>
#include <ATen/ops/_compute_linear_combination_ops.h>
#include <ATen/ops/_compute_linear_combination_native.h>
#include <ATen/ops/_compute_linear_combination_ops.h>
#include <ATen/ops/max_native.h>
#include <ATen/ops/max_ops.h>
#include <ATen/ops/max_native.h>
#include <ATen/ops/max_ops.h>
#include <ATen/ops/max_native.h>
#include <ATen/ops/max_ops.h>
#include <ATen/ops/max_native.h>
#include <ATen/ops/max_ops.h>
#include <ATen/ops/amax_native.h>
#include <ATen/ops/amax_ops.h>
#include <ATen/ops/amax_native.h>
#include <ATen/ops/amax_ops.h>
#include <ATen/ops/max_pool2d_backward_native.h>
#include <ATen/ops/max_pool2d_backward_ops.h>
#include <ATen/ops/max_pool2d_backward_native.h>
#include <ATen/ops/max_pool2d_backward_ops.h>
#include <ATen/ops/mkldnn_max_pool2d_native.h>
#include <ATen/ops/mkldnn_max_pool2d_ops.h>
#include <ATen/ops/mkldnn_max_pool2d_native.h>
#include <ATen/ops/mkldnn_max_pool2d_ops.h>
#include <ATen/ops/mkldnn_max_pool2d_backward_native.h>
#include <ATen/ops/mkldnn_max_pool2d_backward_ops.h>
#include <ATen/ops/mkldnn_max_pool2d_backward_native.h>
#include <ATen/ops/mkldnn_max_pool2d_backward_ops.h>
#include <ATen/ops/mkldnn_max_pool3d_native.h>
#include <ATen/ops/mkldnn_max_pool3d_ops.h>
#include <ATen/ops/mkldnn_max_pool3d_native.h>
#include <ATen/ops/mkldnn_max_pool3d_ops.h>
#include <ATen/ops/mkldnn_max_pool3d_backward_native.h>
#include <ATen/ops/mkldnn_max_pool3d_backward_ops.h>
#include <ATen/ops/mkldnn_max_pool3d_backward_native.h>
#include <ATen/ops/mkldnn_max_pool3d_backward_ops.h>
#include <ATen/ops/quantized_max_pool1d_native.h>
#include <ATen/ops/quantized_max_pool1d_ops.h>
#include <ATen/ops/quantized_max_pool1d_native.h>
#include <ATen/ops/quantized_max_pool1d_ops.h>
#include <ATen/ops/quantized_max_pool2d_native.h>
#include <ATen/ops/quantized_max_pool2d_ops.h>
#include <ATen/ops/quantized_max_pool2d_native.h>
#include <ATen/ops/quantized_max_pool2d_ops.h>
#include <ATen/ops/quantized_max_pool3d_native.h>
#include <ATen/ops/quantized_max_pool3d_ops.h>
#include <ATen/ops/quantized_max_pool3d_native.h>
#include <ATen/ops/quantized_max_pool3d_ops.h>
#include <ATen/ops/mean_native.h>
#include <ATen/ops/mean_ops.h>
#include <ATen/ops/mean_native.h>
#include <ATen/ops/mean_ops.h>
#include <ATen/ops/mean_native.h>
#include <ATen/ops/mean_ops.h>
#include <ATen/ops/mean_native.h>
#include <ATen/ops/mean_ops.h>
#include <ATen/ops/mean_native.h>
#include <ATen/ops/mean_ops.h>
#include <ATen/ops/mean_native.h>
#include <ATen/ops/mean_ops.h>
#include <ATen/ops/nanmean_native.h>
#include <ATen/ops/nanmean_ops.h>
#include <ATen/ops/nanmean_native.h>
#include <ATen/ops/nanmean_ops.h>
#include <ATen/ops/median_native.h>
#include <ATen/ops/median_ops.h>
#include <ATen/ops/median_native.h>
#include <ATen/ops/median_ops.h>
#include <ATen/ops/median_native.h>
#include <ATen/ops/median_ops.h>
#include <ATen/ops/median_native.h>
#include <ATen/ops/median_ops.h>
#include <ATen/ops/median_native.h>
#include <ATen/ops/median_ops.h>
#include <ATen/ops/median_native.h>
#include <ATen/ops/median_ops.h>
#include <ATen/ops/nanmedian_native.h>
#include <ATen/ops/nanmedian_ops.h>
#include <ATen/ops/nanmedian_native.h>
#include <ATen/ops/nanmedian_ops.h>
#include <ATen/ops/nanmedian_native.h>
#include <ATen/ops/nanmedian_ops.h>
#include <ATen/ops/nanmedian_native.h>
#include <ATen/ops/nanmedian_ops.h>
#include <ATen/ops/nanmedian_native.h>
#include <ATen/ops/nanmedian_ops.h>
#include <ATen/ops/nanmedian_native.h>
#include <ATen/ops/nanmedian_ops.h>
#include <ATen/ops/min_native.h>
#include <ATen/ops/min_ops.h>
#include <ATen/ops/min_native.h>
#include <ATen/ops/min_ops.h>
#include <ATen/ops/min_native.h>
#include <ATen/ops/min_ops.h>
#include <ATen/ops/min_native.h>
#include <ATen/ops/min_ops.h>
#include <ATen/ops/amin_native.h>
#include <ATen/ops/amin_ops.h>
#include <ATen/ops/amin_native.h>
#include <ATen/ops/amin_ops.h>
#include <ATen/ops/_mps_convolution_native.h>
#include <ATen/ops/_mps_convolution_ops.h>
#include <ATen/ops/_mps_convolution_native.h>
#include <ATen/ops/_mps_convolution_ops.h>
#include <ATen/ops/mps_convolution_backward_native.h>
#include <ATen/ops/mps_convolution_backward_ops.h>
#include <ATen/ops/mps_convolution_backward_native.h>
#include <ATen/ops/mps_convolution_backward_ops.h>
#include <ATen/ops/mkldnn_convolution_native.h>
#include <ATen/ops/mkldnn_convolution_ops.h>
#include <ATen/ops/mkldnn_convolution_native.h>
#include <ATen/ops/mkldnn_convolution_ops.h>
#include <ATen/ops/mkldnn_rnn_layer_native.h>
#include <ATen/ops/mkldnn_rnn_layer_ops.h>
#include <ATen/ops/mkldnn_rnn_layer_native.h>
#include <ATen/ops/mkldnn_rnn_layer_ops.h>
#include <ATen/ops/mkldnn_rnn_layer_backward_native.h>
#include <ATen/ops/mkldnn_rnn_layer_backward_ops.h>
#include <ATen/ops/mkldnn_rnn_layer_backward_native.h>
#include <ATen/ops/mkldnn_rnn_layer_backward_ops.h>
#include <ATen/ops/miopen_batch_norm_native.h>
#include <ATen/ops/miopen_batch_norm_ops.h>
#include <ATen/ops/miopen_batch_norm_native.h>
#include <ATen/ops/miopen_batch_norm_ops.h>
#include <ATen/ops/miopen_batch_norm_backward_native.h>
#include <ATen/ops/miopen_batch_norm_backward_ops.h>
#include <ATen/ops/miopen_batch_norm_backward_native.h>
#include <ATen/ops/miopen_batch_norm_backward_ops.h>
#include <ATen/ops/miopen_convolution_native.h>
#include <ATen/ops/miopen_convolution_ops.h>
#include <ATen/ops/miopen_convolution_native.h>
#include <ATen/ops/miopen_convolution_ops.h>
#include <ATen/ops/miopen_convolution_transpose_native.h>
#include <ATen/ops/miopen_convolution_transpose_ops.h>
#include <ATen/ops/miopen_convolution_transpose_native.h>
#include <ATen/ops/miopen_convolution_transpose_ops.h>
#include <ATen/ops/miopen_depthwise_convolution_native.h>
#include <ATen/ops/miopen_depthwise_convolution_ops.h>
#include <ATen/ops/miopen_depthwise_convolution_native.h>
#include <ATen/ops/miopen_depthwise_convolution_ops.h>
#include <ATen/ops/miopen_rnn_native.h>
#include <ATen/ops/miopen_rnn_ops.h>
#include <ATen/ops/miopen_rnn_native.h>
#include <ATen/ops/miopen_rnn_ops.h>
#include <ATen/ops/miopen_rnn_backward_native.h>
#include <ATen/ops/miopen_rnn_backward_ops.h>
#include <ATen/ops/miopen_rnn_backward_native.h>
#include <ATen/ops/miopen_rnn_backward_ops.h>
#include <ATen/ops/mm_native.h>
#include <ATen/ops/mm_ops.h>
#include <ATen/ops/mm_native.h>
#include <ATen/ops/mm_ops.h>
#include <ATen/ops/_int_mm_native.h>
#include <ATen/ops/_int_mm_ops.h>
#include <ATen/ops/_int_mm_native.h>
#include <ATen/ops/_int_mm_ops.h>
#include <ATen/ops/_sparse_sparse_matmul_native.h>
#include <ATen/ops/_sparse_sparse_matmul_ops.h>
#include <ATen/ops/_sparse_sparse_matmul_native.h>
#include <ATen/ops/_sparse_sparse_matmul_ops.h>
#include <ATen/ops/mode_native.h>
#include <ATen/ops/mode_ops.h>
#include <ATen/ops/mode_native.h>
#include <ATen/ops/mode_ops.h>
#include <ATen/ops/mode_native.h>
#include <ATen/ops/mode_ops.h>
#include <ATen/ops/mode_native.h>
#include <ATen/ops/mode_ops.h>
#include <ATen/ops/mul_native.h>
#include <ATen/ops/mul_ops.h>
#include <ATen/ops/mul_native.h>
#include <ATen/ops/mul_ops.h>
#include <ATen/ops/mul_native.h>
#include <ATen/ops/mul_ops.h>
#include <ATen/ops/mul_native.h>
#include <ATen/ops/mul_ops.h>
#include <ATen/ops/mul_native.h>
#include <ATen/ops/mul_ops.h>
#include <ATen/ops/mul_native.h>
#include <ATen/ops/mul_ops.h>
#include <ATen/ops/multiply_native.h>
#include <ATen/ops/multiply_ops.h>
#include <ATen/ops/multiply_native.h>
#include <ATen/ops/multiply_ops.h>
#include <ATen/ops/multiply_native.h>
#include <ATen/ops/multiply_ops.h>
#include <ATen/ops/mv_native.h>
#include <ATen/ops/mv_ops.h>
#include <ATen/ops/mv_native.h>
#include <ATen/ops/mv_ops.h>
#include <ATen/ops/mvlgamma_native.h>
#include <ATen/ops/mvlgamma_ops.h>
#include <ATen/ops/mvlgamma_native.h>
#include <ATen/ops/mvlgamma_ops.h>
#include <ATen/ops/mvlgamma_native.h>
#include <ATen/ops/mvlgamma_ops.h>
#include <ATen/ops/narrow_copy_native.h>
#include <ATen/ops/narrow_copy_ops.h>
#include <ATen/ops/narrow_copy_native.h>
#include <ATen/ops/narrow_copy_ops.h>
#include <ATen/ops/native_batch_norm_native.h>
#include <ATen/ops/native_batch_norm_ops.h>
#include <ATen/ops/native_batch_norm_native.h>
#include <ATen/ops/native_batch_norm_ops.h>
#include <ATen/ops/_native_batch_norm_legit_native.h>
#include <ATen/ops/_native_batch_norm_legit_ops.h>
#include <ATen/ops/_native_batch_norm_legit_native.h>
#include <ATen/ops/_native_batch_norm_legit_ops.h>
#include <ATen/ops/_native_batch_norm_legit_native.h>
#include <ATen/ops/_native_batch_norm_legit_ops.h>
#include <ATen/ops/_native_batch_norm_legit_no_training_native.h>
#include <ATen/ops/_native_batch_norm_legit_no_training_ops.h>
#include <ATen/ops/_native_batch_norm_legit_no_training_native.h>
#include <ATen/ops/_native_batch_norm_legit_no_training_ops.h>
#include <ATen/ops/_native_batch_norm_legit_native.h>
#include <ATen/ops/_native_batch_norm_legit_ops.h>
#include <ATen/ops/_native_batch_norm_legit_native.h>
#include <ATen/ops/_native_batch_norm_legit_ops.h>
#include <ATen/ops/batch_norm_stats_native.h>
#include <ATen/ops/batch_norm_stats_ops.h>
#include <ATen/ops/batch_norm_stats_native.h>
#include <ATen/ops/batch_norm_stats_ops.h>
#include <ATen/ops/batch_norm_elemt_native.h>
#include <ATen/ops/batch_norm_elemt_ops.h>
#include <ATen/ops/batch_norm_elemt_native.h>
#include <ATen/ops/batch_norm_elemt_ops.h>
#include <ATen/ops/batch_norm_gather_stats_native.h>
#include <ATen/ops/batch_norm_gather_stats_ops.h>
#include <ATen/ops/batch_norm_gather_stats_native.h>
#include <ATen/ops/batch_norm_gather_stats_ops.h>
#include <ATen/ops/batch_norm_gather_stats_with_counts_native.h>
#include <ATen/ops/batch_norm_gather_stats_with_counts_ops.h>
#include <ATen/ops/batch_norm_gather_stats_with_counts_native.h>
#include <ATen/ops/batch_norm_gather_stats_with_counts_ops.h>
#include <ATen/ops/native_batch_norm_backward_native.h>
#include <ATen/ops/native_batch_norm_backward_ops.h>
#include <ATen/ops/native_batch_norm_backward_native.h>
#include <ATen/ops/native_batch_norm_backward_ops.h>
#include <ATen/ops/batch_norm_backward_reduce_native.h>
#include <ATen/ops/batch_norm_backward_reduce_ops.h>
#include <ATen/ops/batch_norm_backward_reduce_native.h>
#include <ATen/ops/batch_norm_backward_reduce_ops.h>
#include <ATen/ops/batch_norm_backward_elemt_native.h>
#include <ATen/ops/batch_norm_backward_elemt_ops.h>
#include <ATen/ops/batch_norm_backward_elemt_native.h>
#include <ATen/ops/batch_norm_backward_elemt_ops.h>
#include <ATen/ops/batch_norm_update_stats_native.h>
#include <ATen/ops/batch_norm_update_stats_ops.h>
#include <ATen/ops/batch_norm_update_stats_native.h>
#include <ATen/ops/batch_norm_update_stats_ops.h>
#include <ATen/ops/_nnpack_spatial_convolution_native.h>
#include <ATen/ops/_nnpack_spatial_convolution_ops.h>
#include <ATen/ops/_nnpack_spatial_convolution_native.h>
#include <ATen/ops/_nnpack_spatial_convolution_ops.h>
#include <ATen/ops/ones_native.h>
#include <ATen/ops/ones_ops.h>
#include <ATen/ops/ones_native.h>
#include <ATen/ops/ones_ops.h>
#include <ATen/ops/ones_native.h>
#include <ATen/ops/ones_ops.h>
#include <ATen/ops/ones_native.h>
#include <ATen/ops/ones_ops.h>
#include <ATen/ops/ones_like_native.h>
#include <ATen/ops/ones_like_ops.h>
#include <ATen/ops/ones_like_native.h>
#include <ATen/ops/ones_like_ops.h>
#include <ATen/ops/_euclidean_dist_native.h>
#include <ATen/ops/_euclidean_dist_ops.h>
#include <ATen/ops/_euclidean_dist_native.h>
#include <ATen/ops/_euclidean_dist_ops.h>
#include <ATen/ops/_cdist_forward_native.h>
#include <ATen/ops/_cdist_forward_ops.h>
#include <ATen/ops/_cdist_forward_native.h>
#include <ATen/ops/_cdist_forward_ops.h>
#include <ATen/ops/_cdist_backward_native.h>
#include <ATen/ops/_cdist_backward_ops.h>
#include <ATen/ops/_cdist_backward_native.h>
#include <ATen/ops/_cdist_backward_ops.h>
#include <ATen/ops/_pdist_forward_native.h>
#include <ATen/ops/_pdist_forward_ops.h>
#include <ATen/ops/_pdist_forward_native.h>
#include <ATen/ops/_pdist_forward_ops.h>
#include <ATen/ops/_pdist_backward_native.h>
#include <ATen/ops/_pdist_backward_ops.h>
#include <ATen/ops/_pdist_backward_native.h>
#include <ATen/ops/_pdist_backward_ops.h>
#include <ATen/ops/pixel_shuffle_native.h>
#include <ATen/ops/pixel_shuffle_ops.h>
#include <ATen/ops/pixel_shuffle_native.h>
#include <ATen/ops/pixel_shuffle_ops.h>
#include <ATen/ops/pixel_unshuffle_native.h>
#include <ATen/ops/pixel_unshuffle_ops.h>
#include <ATen/ops/pixel_unshuffle_native.h>
#include <ATen/ops/pixel_unshuffle_ops.h>
#include <ATen/ops/channel_shuffle_native.h>
#include <ATen/ops/channel_shuffle_ops.h>
#include <ATen/ops/channel_shuffle_native.h>
#include <ATen/ops/channel_shuffle_ops.h>
#include <ATen/ops/_pin_memory_native.h>
#include <ATen/ops/_pin_memory_ops.h>
#include <ATen/ops/_pin_memory_native.h>
#include <ATen/ops/_pin_memory_ops.h>
#include <ATen/ops/rad2deg_native.h>
#include <ATen/ops/rad2deg_ops.h>
#include <ATen/ops/rad2deg_native.h>
#include <ATen/ops/rad2deg_ops.h>
#include <ATen/ops/rad2deg_native.h>
#include <ATen/ops/rad2deg_ops.h>
#include <ATen/ops/deg2rad_native.h>
#include <ATen/ops/deg2rad_ops.h>
#include <ATen/ops/deg2rad_native.h>
#include <ATen/ops/deg2rad_ops.h>
#include <ATen/ops/deg2rad_native.h>
#include <ATen/ops/deg2rad_ops.h>
#include <ATen/ops/scalar_tensor_native.h>
#include <ATen/ops/scalar_tensor_ops.h>
#include <ATen/ops/scalar_tensor_native.h>
#include <ATen/ops/scalar_tensor_ops.h>
#include <ATen/ops/rand_native.h>
#include <ATen/ops/rand_ops.h>
#include <ATen/ops/rand_native.h>
#include <ATen/ops/rand_ops.h>
#include <ATen/ops/rand_native.h>
#include <ATen/ops/rand_ops.h>
#include <ATen/ops/rand_native.h>
#include <ATen/ops/rand_ops.h>
#include <ATen/ops/rand_native.h>
#include <ATen/ops/rand_ops.h>
#include <ATen/ops/rand_native.h>
#include <ATen/ops/rand_ops.h>
#include <ATen/ops/rand_native.h>
#include <ATen/ops/rand_ops.h>
#include <ATen/ops/rand_native.h>
#include <ATen/ops/rand_ops.h>
#include <ATen/ops/rand_like_native.h>
#include <ATen/ops/rand_like_ops.h>
#include <ATen/ops/rand_like_native.h>
#include <ATen/ops/rand_like_ops.h>
#include <ATen/ops/randint_native.h>
#include <ATen/ops/randint_ops.h>
#include <ATen/ops/randint_native.h>
#include <ATen/ops/randint_ops.h>
#include <ATen/ops/randint_native.h>
#include <ATen/ops/randint_ops.h>
#include <ATen/ops/randint_native.h>
#include <ATen/ops/randint_ops.h>
#include <ATen/ops/randint_native.h>
#include <ATen/ops/randint_ops.h>
#include <ATen/ops/randint_native.h>
#include <ATen/ops/randint_ops.h>
#include <ATen/ops/randint_native.h>
#include <ATen/ops/randint_ops.h>
#include <ATen/ops/randint_native.h>
#include <ATen/ops/randint_ops.h>
#include <ATen/ops/randint_like_native.h>
#include <ATen/ops/randint_like_ops.h>
#include <ATen/ops/randint_like_native.h>
#include <ATen/ops/randint_like_ops.h>
#include <ATen/ops/randint_like_native.h>
#include <ATen/ops/randint_like_ops.h>
#include <ATen/ops/randint_like_native.h>
#include <ATen/ops/randint_like_ops.h>
#include <ATen/ops/randn_native.h>
#include <ATen/ops/randn_ops.h>
#include <ATen/ops/randn_native.h>
#include <ATen/ops/randn_ops.h>
#include <ATen/ops/randn_native.h>
#include <ATen/ops/randn_ops.h>
#include <ATen/ops/randn_native.h>
#include <ATen/ops/randn_ops.h>
#include <ATen/ops/randn_native.h>
#include <ATen/ops/randn_ops.h>
#include <ATen/ops/randn_native.h>
#include <ATen/ops/randn_ops.h>
#include <ATen/ops/randn_native.h>
#include <ATen/ops/randn_ops.h>
#include <ATen/ops/randn_native.h>
#include <ATen/ops/randn_ops.h>
#include <ATen/ops/randn_like_native.h>
#include <ATen/ops/randn_like_ops.h>
#include <ATen/ops/randn_like_native.h>
#include <ATen/ops/randn_like_ops.h>
#include <ATen/ops/randperm_native.h>
#include <ATen/ops/randperm_ops.h>
#include <ATen/ops/randperm_native.h>
#include <ATen/ops/randperm_ops.h>
#include <ATen/ops/randperm_native.h>
#include <ATen/ops/randperm_ops.h>
#include <ATen/ops/randperm_native.h>
#include <ATen/ops/randperm_ops.h>
#include <ATen/ops/range_native.h>
#include <ATen/ops/range_ops.h>
#include <ATen/ops/range_native.h>
#include <ATen/ops/range_ops.h>
#include <ATen/ops/range_native.h>
#include <ATen/ops/range_ops.h>
#include <ATen/ops/range_native.h>
#include <ATen/ops/range_ops.h>
#include <ATen/ops/reciprocal_native.h>
#include <ATen/ops/reciprocal_ops.h>
#include <ATen/ops/reciprocal_native.h>
#include <ATen/ops/reciprocal_ops.h>
#include <ATen/ops/reciprocal_native.h>
#include <ATen/ops/reciprocal_ops.h>
#include <ATen/ops/neg_native.h>
#include <ATen/ops/neg_ops.h>
#include <ATen/ops/neg_native.h>
#include <ATen/ops/neg_ops.h>
#include <ATen/ops/neg_native.h>
#include <ATen/ops/neg_ops.h>
#include <ATen/ops/negative_native.h>
#include <ATen/ops/negative_ops.h>
#include <ATen/ops/negative_native.h>
#include <ATen/ops/negative_ops.h>
#include <ATen/ops/negative_native.h>
#include <ATen/ops/negative_ops.h>
#include <ATen/ops/repeat_native.h>
#include <ATen/ops/repeat_ops.h>
#include <ATen/ops/repeat_native.h>
#include <ATen/ops/repeat_ops.h>
#include <ATen/ops/repeat_interleave_native.h>
#include <ATen/ops/repeat_interleave_ops.h>
#include <ATen/ops/repeat_interleave_native.h>
#include <ATen/ops/repeat_interleave_ops.h>
#include <ATen/ops/_mkldnn_reshape_native.h>
#include <ATen/ops/_mkldnn_reshape_ops.h>
#include <ATen/ops/_mkldnn_reshape_native.h>
#include <ATen/ops/_mkldnn_reshape_ops.h>
#include <ATen/ops/round_native.h>
#include <ATen/ops/round_ops.h>
#include <ATen/ops/round_native.h>
#include <ATen/ops/round_ops.h>
#include <ATen/ops/round_native.h>
#include <ATen/ops/round_ops.h>
#include <ATen/ops/round_native.h>
#include <ATen/ops/round_ops.h>
#include <ATen/ops/round_native.h>
#include <ATen/ops/round_ops.h>
#include <ATen/ops/round_native.h>
#include <ATen/ops/round_ops.h>
#include <ATen/ops/relu_native.h>
#include <ATen/ops/relu_ops.h>
#include <ATen/ops/relu_native.h>
#include <ATen/ops/relu_ops.h>
#include <ATen/ops/relu_native.h>
#include <ATen/ops/relu_ops.h>
#include <ATen/ops/gelu_native.h>
#include <ATen/ops/gelu_ops.h>
#include <ATen/ops/gelu_native.h>
#include <ATen/ops/gelu_ops.h>
#include <ATen/ops/gelu_native.h>
#include <ATen/ops/gelu_ops.h>
#include <ATen/ops/gelu_backward_native.h>
#include <ATen/ops/gelu_backward_ops.h>
#include <ATen/ops/gelu_backward_native.h>
#include <ATen/ops/gelu_backward_ops.h>
#include <ATen/ops/hardshrink_native.h>
#include <ATen/ops/hardshrink_ops.h>
#include <ATen/ops/hardshrink_native.h>
#include <ATen/ops/hardshrink_ops.h>
#include <ATen/ops/hardshrink_backward_native.h>
#include <ATen/ops/hardshrink_backward_ops.h>
#include <ATen/ops/hardshrink_backward_native.h>
#include <ATen/ops/hardshrink_backward_ops.h>
#include <ATen/ops/rsqrt_native.h>
#include <ATen/ops/rsqrt_ops.h>
#include <ATen/ops/rsqrt_native.h>
#include <ATen/ops/rsqrt_ops.h>
#include <ATen/ops/rsqrt_native.h>
#include <ATen/ops/rsqrt_ops.h>
#include <ATen/ops/select_backward_native.h>
#include <ATen/ops/select_backward_ops.h>
#include <ATen/ops/select_backward_native.h>
#include <ATen/ops/select_backward_ops.h>
#include <ATen/ops/celu_native.h>
#include <ATen/ops/celu_ops.h>
#include <ATen/ops/celu_native.h>
#include <ATen/ops/celu_ops.h>
#include <ATen/ops/celu_native.h>
#include <ATen/ops/celu_ops.h>
#include <ATen/ops/silu_native.h>
#include <ATen/ops/silu_ops.h>
#include <ATen/ops/silu_native.h>
#include <ATen/ops/silu_ops.h>
#include <ATen/ops/silu_native.h>
#include <ATen/ops/silu_ops.h>
#include <ATen/ops/silu_backward_native.h>
#include <ATen/ops/silu_backward_ops.h>
#include <ATen/ops/silu_backward_native.h>
#include <ATen/ops/silu_backward_ops.h>
#include <ATen/ops/mish_native.h>
#include <ATen/ops/mish_ops.h>
#include <ATen/ops/mish_native.h>
#include <ATen/ops/mish_ops.h>
#include <ATen/ops/mish_native.h>
#include <ATen/ops/mish_ops.h>
#include <ATen/ops/sigmoid_native.h>
#include <ATen/ops/sigmoid_ops.h>
#include <ATen/ops/sigmoid_native.h>
#include <ATen/ops/sigmoid_ops.h>
#include <ATen/ops/sigmoid_native.h>
#include <ATen/ops/sigmoid_ops.h>
#include <ATen/ops/logit_native.h>
#include <ATen/ops/logit_ops.h>
#include <ATen/ops/logit_native.h>
#include <ATen/ops/logit_ops.h>
#include <ATen/ops/logit_native.h>
#include <ATen/ops/logit_ops.h>
#include <ATen/ops/sin_native.h>
#include <ATen/ops/sin_ops.h>
#include <ATen/ops/sin_native.h>
#include <ATen/ops/sin_ops.h>
#include <ATen/ops/sin_native.h>
#include <ATen/ops/sin_ops.h>
#include <ATen/ops/sinc_native.h>
#include <ATen/ops/sinc_ops.h>
#include <ATen/ops/sinc_native.h>
#include <ATen/ops/sinc_ops.h>
#include <ATen/ops/sinc_native.h>
#include <ATen/ops/sinc_ops.h>
#include <ATen/ops/sinh_native.h>
#include <ATen/ops/sinh_ops.h>
#include <ATen/ops/sinh_native.h>
#include <ATen/ops/sinh_ops.h>
#include <ATen/ops/sinh_native.h>
#include <ATen/ops/sinh_ops.h>
#include <ATen/ops/slice_backward_native.h>
#include <ATen/ops/slice_backward_ops.h>
#include <ATen/ops/slice_backward_native.h>
#include <ATen/ops/slice_backward_ops.h>
#include <ATen/ops/slice_scatter_native.h>
#include <ATen/ops/slice_scatter_ops.h>
#include <ATen/ops/slice_scatter_native.h>
#include <ATen/ops/slice_scatter_ops.h>
#include <ATen/ops/select_scatter_native.h>
#include <ATen/ops/select_scatter_ops.h>
#include <ATen/ops/select_scatter_native.h>
#include <ATen/ops/select_scatter_ops.h>
#include <ATen/ops/diagonal_scatter_native.h>
#include <ATen/ops/diagonal_scatter_ops.h>
#include <ATen/ops/diagonal_scatter_native.h>
#include <ATen/ops/diagonal_scatter_ops.h>
#include <ATen/ops/as_strided_scatter_native.h>
#include <ATen/ops/as_strided_scatter_ops.h>
#include <ATen/ops/as_strided_scatter_native.h>
#include <ATen/ops/as_strided_scatter_ops.h>
#include <ATen/ops/softmax_native.h>
#include <ATen/ops/softmax_ops.h>
#include <ATen/ops/softmax_native.h>
#include <ATen/ops/softmax_ops.h>
#include <ATen/ops/_softmax_native.h>
#include <ATen/ops/_softmax_ops.h>
#include <ATen/ops/_softmax_native.h>
#include <ATen/ops/_softmax_ops.h>
#include <ATen/ops/_softmax_backward_data_native.h>
#include <ATen/ops/_softmax_backward_data_ops.h>
#include <ATen/ops/_softmax_backward_data_native.h>
#include <ATen/ops/_softmax_backward_data_ops.h>
#include <ATen/ops/unsafe_split_native.h>
#include <ATen/ops/unsafe_split_ops.h>
#include <ATen/ops/unsafe_split_native.h>
#include <ATen/ops/unsafe_split_ops.h>
#include <ATen/ops/unsafe_split_with_sizes_native.h>
#include <ATen/ops/unsafe_split_with_sizes_ops.h>
#include <ATen/ops/unsafe_split_with_sizes_native.h>
#include <ATen/ops/unsafe_split_with_sizes_ops.h>
#include <ATen/ops/sspaddmm_native.h>
#include <ATen/ops/sspaddmm_ops.h>
#include <ATen/ops/sspaddmm_native.h>
#include <ATen/ops/sspaddmm_ops.h>
#include <ATen/ops/_chunk_cat_native.h>
#include <ATen/ops/_chunk_cat_ops.h>
#include <ATen/ops/_chunk_cat_native.h>
#include <ATen/ops/_chunk_cat_ops.h>
#include <ATen/ops/stack_native.h>
#include <ATen/ops/stack_ops.h>
#include <ATen/ops/stack_native.h>
#include <ATen/ops/stack_ops.h>
#include <ATen/ops/_stack_native.h>
#include <ATen/ops/_stack_ops.h>
#include <ATen/ops/_stack_native.h>
#include <ATen/ops/_stack_ops.h>
#include <ATen/ops/hstack_native.h>
#include <ATen/ops/hstack_ops.h>
#include <ATen/ops/hstack_native.h>
#include <ATen/ops/hstack_ops.h>
#include <ATen/ops/vstack_native.h>
#include <ATen/ops/vstack_ops.h>
#include <ATen/ops/vstack_native.h>
#include <ATen/ops/vstack_ops.h>
#include <ATen/ops/dstack_native.h>
#include <ATen/ops/dstack_ops.h>
#include <ATen/ops/dstack_native.h>
#include <ATen/ops/dstack_ops.h>
#include <ATen/ops/sum_native.h>
#include <ATen/ops/sum_ops.h>
#include <ATen/ops/sum_native.h>
#include <ATen/ops/sum_ops.h>
#include <ATen/ops/sum_native.h>
#include <ATen/ops/sum_ops.h>
#include <ATen/ops/sum_native.h>
#include <ATen/ops/sum_ops.h>
#include <ATen/ops/sum_native.h>
#include <ATen/ops/sum_ops.h>
#include <ATen/ops/sum_native.h>
#include <ATen/ops/sum_ops.h>
#include <ATen/ops/nansum_native.h>
#include <ATen/ops/nansum_ops.h>
#include <ATen/ops/nansum_native.h>
#include <ATen/ops/nansum_ops.h>
#include <ATen/ops/sqrt_native.h>
#include <ATen/ops/sqrt_ops.h>
#include <ATen/ops/sqrt_native.h>
#include <ATen/ops/sqrt_ops.h>
#include <ATen/ops/sqrt_native.h>
#include <ATen/ops/sqrt_ops.h>
#include <ATen/ops/square_native.h>
#include <ATen/ops/square_ops.h>
#include <ATen/ops/square_native.h>
#include <ATen/ops/square_ops.h>
#include <ATen/ops/square_native.h>
#include <ATen/ops/square_ops.h>
#include <ATen/ops/std_native.h>
#include <ATen/ops/std_ops.h>
#include <ATen/ops/std_native.h>
#include <ATen/ops/std_ops.h>
#include <ATen/ops/std_native.h>
#include <ATen/ops/std_ops.h>
#include <ATen/ops/std_native.h>
#include <ATen/ops/std_ops.h>
#include <ATen/ops/std_mean_native.h>
#include <ATen/ops/std_mean_ops.h>
#include <ATen/ops/std_mean_native.h>
#include <ATen/ops/std_mean_ops.h>
#include <ATen/ops/std_native.h>
#include <ATen/ops/std_ops.h>
#include <ATen/ops/std_native.h>
#include <ATen/ops/std_ops.h>
#include <ATen/ops/std_native.h>
#include <ATen/ops/std_ops.h>
#include <ATen/ops/std_native.h>
#include <ATen/ops/std_ops.h>
#include <ATen/ops/prod_native.h>
#include <ATen/ops/prod_ops.h>
#include <ATen/ops/prod_native.h>
#include <ATen/ops/prod_ops.h>
#include <ATen/ops/prod_native.h>
#include <ATen/ops/prod_ops.h>
#include <ATen/ops/prod_native.h>
#include <ATen/ops/prod_ops.h>
#include <ATen/ops/prod_native.h>
#include <ATen/ops/prod_ops.h>
#include <ATen/ops/prod_native.h>
#include <ATen/ops/prod_ops.h>
#include <ATen/ops/tan_native.h>
#include <ATen/ops/tan_ops.h>
#include <ATen/ops/tan_native.h>
#include <ATen/ops/tan_ops.h>
#include <ATen/ops/tan_native.h>
#include <ATen/ops/tan_ops.h>
#include <ATen/ops/tanh_native.h>
#include <ATen/ops/tanh_ops.h>
#include <ATen/ops/tanh_native.h>
#include <ATen/ops/tanh_ops.h>
#include <ATen/ops/tanh_native.h>
#include <ATen/ops/tanh_ops.h>
#include <ATen/ops/tensordot_native.h>
#include <ATen/ops/tensordot_ops.h>
#include <ATen/ops/tensordot_native.h>
#include <ATen/ops/tensordot_ops.h>
#include <ATen/ops/threshold_native.h>
#include <ATen/ops/threshold_ops.h>
#include <ATen/ops/threshold_native.h>
#include <ATen/ops/threshold_ops.h>
#include <ATen/ops/threshold_native.h>
#include <ATen/ops/threshold_ops.h>
#include <ATen/ops/threshold_backward_native.h>
#include <ATen/ops/threshold_backward_ops.h>
#include <ATen/ops/threshold_backward_native.h>
#include <ATen/ops/threshold_backward_ops.h>
#include <ATen/ops/_mkldnn_transpose_native.h>
#include <ATen/ops/_mkldnn_transpose_ops.h>
#include <ATen/ops/_mkldnn_transpose_native.h>
#include <ATen/ops/_mkldnn_transpose_ops.h>
#include <ATen/ops/_mkldnn_transpose_native.h>
#include <ATen/ops/_mkldnn_transpose_ops.h>
#include <ATen/ops/flip_native.h>
#include <ATen/ops/flip_ops.h>
#include <ATen/ops/flip_native.h>
#include <ATen/ops/flip_ops.h>
#include <ATen/ops/roll_native.h>
#include <ATen/ops/roll_ops.h>
#include <ATen/ops/roll_native.h>
#include <ATen/ops/roll_ops.h>
#include <ATen/ops/rot90_native.h>
#include <ATen/ops/rot90_ops.h>
#include <ATen/ops/rot90_native.h>
#include <ATen/ops/rot90_ops.h>
#include <ATen/ops/_transform_bias_rescale_qkv_native.h>
#include <ATen/ops/_transform_bias_rescale_qkv_ops.h>
#include <ATen/ops/_transform_bias_rescale_qkv_native.h>
#include <ATen/ops/_transform_bias_rescale_qkv_ops.h>
#include <ATen/ops/_nested_tensor_from_mask_native.h>
#include <ATen/ops/_nested_tensor_from_mask_ops.h>
#include <ATen/ops/_nested_tensor_from_mask_native.h>
#include <ATen/ops/_nested_tensor_from_mask_ops.h>
#include <ATen/ops/_nested_from_padded_native.h>
#include <ATen/ops/_nested_from_padded_ops.h>
#include <ATen/ops/_nested_from_padded_native.h>
#include <ATen/ops/_nested_from_padded_ops.h>
#include <ATen/ops/_nested_tensor_size_native.h>
#include <ATen/ops/_nested_tensor_size_ops.h>
#include <ATen/ops/_nested_tensor_size_native.h>
#include <ATen/ops/_nested_tensor_size_ops.h>
#include <ATen/ops/_nested_tensor_strides_native.h>
#include <ATen/ops/_nested_tensor_strides_ops.h>
#include <ATen/ops/_nested_tensor_strides_native.h>
#include <ATen/ops/_nested_tensor_strides_ops.h>
#include <ATen/ops/_nested_tensor_storage_offsets_native.h>
#include <ATen/ops/_nested_tensor_storage_offsets_ops.h>
#include <ATen/ops/_nested_tensor_storage_offsets_native.h>
#include <ATen/ops/_nested_tensor_storage_offsets_ops.h>
#include <ATen/ops/_nested_from_padded_and_nested_example_native.h>
#include <ATen/ops/_nested_from_padded_and_nested_example_ops.h>
#include <ATen/ops/_nested_from_padded_and_nested_example_native.h>
#include <ATen/ops/_nested_from_padded_and_nested_example_ops.h>
#include <ATen/ops/_nested_view_from_buffer_copy_native.h>
#include <ATen/ops/_nested_view_from_buffer_copy_ops.h>
#include <ATen/ops/_nested_view_from_buffer_copy_native.h>
#include <ATen/ops/_nested_view_from_buffer_copy_ops.h>
#include <ATen/ops/_nested_view_from_jagged_copy_native.h>
#include <ATen/ops/_nested_view_from_jagged_copy_ops.h>
#include <ATen/ops/_nested_view_from_jagged_copy_native.h>
#include <ATen/ops/_nested_view_from_jagged_copy_ops.h>
#include <ATen/ops/_nested_get_values_copy_native.h>
#include <ATen/ops/_nested_get_values_copy_ops.h>
#include <ATen/ops/_nested_get_values_copy_native.h>
#include <ATen/ops/_nested_get_values_copy_ops.h>
#include <ATen/ops/_trilinear_native.h>
#include <ATen/ops/_trilinear_ops.h>
#include <ATen/ops/_trilinear_native.h>
#include <ATen/ops/_trilinear_ops.h>
#include <ATen/ops/trunc_native.h>
#include <ATen/ops/trunc_ops.h>
#include <ATen/ops/trunc_native.h>
#include <ATen/ops/trunc_ops.h>
#include <ATen/ops/trunc_native.h>
#include <ATen/ops/trunc_ops.h>
#include <ATen/ops/fix_native.h>
#include <ATen/ops/fix_ops.h>
#include <ATen/ops/fix_native.h>
#include <ATen/ops/fix_ops.h>
#include <ATen/ops/fix_native.h>
#include <ATen/ops/fix_ops.h>
#include <ATen/ops/_unique_native.h>
#include <ATen/ops/_unique_ops.h>
#include <ATen/ops/_unique_native.h>
#include <ATen/ops/_unique_ops.h>
#include <ATen/ops/unique_dim_native.h>
#include <ATen/ops/unique_dim_ops.h>
#include <ATen/ops/unique_dim_native.h>
#include <ATen/ops/unique_dim_ops.h>
#include <ATen/ops/unique_consecutive_native.h>
#include <ATen/ops/unique_consecutive_ops.h>
#include <ATen/ops/unique_consecutive_native.h>
#include <ATen/ops/unique_consecutive_ops.h>
#include <ATen/ops/unique_dim_consecutive_native.h>
#include <ATen/ops/unique_dim_consecutive_ops.h>
#include <ATen/ops/unique_dim_consecutive_native.h>
#include <ATen/ops/unique_dim_consecutive_ops.h>
#include <ATen/ops/_unique2_native.h>
#include <ATen/ops/_unique2_ops.h>
#include <ATen/ops/_unique2_native.h>
#include <ATen/ops/_unique2_ops.h>
#include <ATen/ops/_unsafe_view_native.h>
#include <ATen/ops/_unsafe_view_ops.h>
#include <ATen/ops/_unsafe_view_native.h>
#include <ATen/ops/_unsafe_view_ops.h>
#include <ATen/ops/var_native.h>
#include <ATen/ops/var_ops.h>
#include <ATen/ops/var_native.h>
#include <ATen/ops/var_ops.h>
#include <ATen/ops/var_native.h>
#include <ATen/ops/var_ops.h>
#include <ATen/ops/var_native.h>
#include <ATen/ops/var_ops.h>
#include <ATen/ops/var_native.h>
#include <ATen/ops/var_ops.h>
#include <ATen/ops/var_native.h>
#include <ATen/ops/var_ops.h>
#include <ATen/ops/var_native.h>
#include <ATen/ops/var_ops.h>
#include <ATen/ops/var_native.h>
#include <ATen/ops/var_ops.h>
#include <ATen/ops/var_mean_native.h>
#include <ATen/ops/var_mean_ops.h>
#include <ATen/ops/var_mean_native.h>
#include <ATen/ops/var_mean_ops.h>
#include <ATen/ops/where_native.h>
#include <ATen/ops/where_ops.h>
#include <ATen/ops/where_native.h>
#include <ATen/ops/where_ops.h>
#include <ATen/ops/_weight_norm_interface_native.h>
#include <ATen/ops/_weight_norm_interface_ops.h>
#include <ATen/ops/_weight_norm_interface_native.h>
#include <ATen/ops/_weight_norm_interface_ops.h>
#include <ATen/ops/_weight_norm_interface_backward_native.h>
#include <ATen/ops/_weight_norm_interface_backward_ops.h>
#include <ATen/ops/_weight_norm_interface_backward_native.h>
#include <ATen/ops/_weight_norm_interface_backward_ops.h>
#include <ATen/ops/zeros_native.h>
#include <ATen/ops/zeros_ops.h>
#include <ATen/ops/zeros_native.h>
#include <ATen/ops/zeros_ops.h>
#include <ATen/ops/_efficientzerotensor_native.h>
#include <ATen/ops/_efficientzerotensor_ops.h>
#include <ATen/ops/_efficientzerotensor_native.h>
#include <ATen/ops/_efficientzerotensor_ops.h>
#include <ATen/ops/zeros_native.h>
#include <ATen/ops/zeros_ops.h>
#include <ATen/ops/zeros_native.h>
#include <ATen/ops/zeros_ops.h>
#include <ATen/ops/zeros_like_native.h>
#include <ATen/ops/zeros_like_ops.h>
#include <ATen/ops/zeros_like_native.h>
#include <ATen/ops/zeros_like_ops.h>
#include <ATen/ops/_standard_gamma_grad_native.h>
#include <ATen/ops/_standard_gamma_grad_ops.h>
#include <ATen/ops/_standard_gamma_grad_native.h>
#include <ATen/ops/_standard_gamma_grad_ops.h>
#include <ATen/ops/_standard_gamma_native.h>
#include <ATen/ops/_standard_gamma_ops.h>
#include <ATen/ops/_standard_gamma_native.h>
#include <ATen/ops/_standard_gamma_ops.h>
#include <ATen/ops/_dirichlet_grad_native.h>
#include <ATen/ops/_dirichlet_grad_ops.h>
#include <ATen/ops/_dirichlet_grad_native.h>
#include <ATen/ops/_dirichlet_grad_ops.h>
#include <ATen/ops/_sample_dirichlet_native.h>
#include <ATen/ops/_sample_dirichlet_ops.h>
#include <ATen/ops/_sample_dirichlet_native.h>
#include <ATen/ops/_sample_dirichlet_ops.h>
#include <ATen/ops/poisson_native.h>
#include <ATen/ops/poisson_ops.h>
#include <ATen/ops/poisson_native.h>
#include <ATen/ops/poisson_ops.h>
#include <ATen/ops/binomial_native.h>
#include <ATen/ops/binomial_ops.h>
#include <ATen/ops/binomial_native.h>
#include <ATen/ops/binomial_ops.h>
#include <ATen/ops/native_norm_native.h>
#include <ATen/ops/native_norm_ops.h>
#include <ATen/ops/native_norm_native.h>
#include <ATen/ops/native_norm_ops.h>
#include <ATen/ops/native_norm_native.h>
#include <ATen/ops/native_norm_ops.h>
#include <ATen/ops/native_norm_native.h>
#include <ATen/ops/native_norm_ops.h>
#include <ATen/ops/_batch_norm_with_update_native.h>
#include <ATen/ops/_batch_norm_with_update_ops.h>
#include <ATen/ops/_batch_norm_with_update_native.h>
#include <ATen/ops/_batch_norm_with_update_ops.h>
#include <ATen/ops/_batch_norm_with_update_native.h>
#include <ATen/ops/_batch_norm_with_update_ops.h>
#include <ATen/ops/_batch_norm_no_update_native.h>
#include <ATen/ops/_batch_norm_no_update_ops.h>
#include <ATen/ops/_batch_norm_no_update_native.h>
#include <ATen/ops/_batch_norm_no_update_ops.h>
#include <ATen/ops/_sparse_sum_native.h>
#include <ATen/ops/_sparse_sum_ops.h>
#include <ATen/ops/_sparse_sum_native.h>
#include <ATen/ops/_sparse_sum_ops.h>
#include <ATen/ops/_sparse_sum_backward_native.h>
#include <ATen/ops/_sparse_sum_backward_ops.h>
#include <ATen/ops/_sparse_sum_backward_native.h>
#include <ATen/ops/_sparse_sum_backward_ops.h>
#include <ATen/ops/_sparse_csr_sum_native.h>
#include <ATen/ops/_sparse_csr_sum_ops.h>
#include <ATen/ops/_sparse_csr_sum_native.h>
#include <ATen/ops/_sparse_csr_sum_ops.h>
#include <ATen/ops/_sparse_csr_prod_native.h>
#include <ATen/ops/_sparse_csr_prod_ops.h>
#include <ATen/ops/_sparse_csr_prod_native.h>
#include <ATen/ops/_sparse_csr_prod_ops.h>
#include <ATen/ops/_sparse_softmax_native.h>
#include <ATen/ops/_sparse_softmax_ops.h>
#include <ATen/ops/_sparse_softmax_native.h>
#include <ATen/ops/_sparse_softmax_ops.h>
#include <ATen/ops/_sparse_softmax_backward_data_native.h>
#include <ATen/ops/_sparse_softmax_backward_data_ops.h>
#include <ATen/ops/_sparse_softmax_backward_data_native.h>
#include <ATen/ops/_sparse_softmax_backward_data_ops.h>
#include <ATen/ops/_sparse_log_softmax_native.h>
#include <ATen/ops/_sparse_log_softmax_ops.h>
#include <ATen/ops/_sparse_log_softmax_native.h>
#include <ATen/ops/_sparse_log_softmax_ops.h>
#include <ATen/ops/_sparse_log_softmax_backward_data_native.h>
#include <ATen/ops/_sparse_log_softmax_backward_data_ops.h>
#include <ATen/ops/_sparse_log_softmax_backward_data_native.h>
#include <ATen/ops/_sparse_log_softmax_backward_data_ops.h>
#include <ATen/ops/_spdiags_native.h>
#include <ATen/ops/_spdiags_ops.h>
#include <ATen/ops/_spdiags_native.h>
#include <ATen/ops/_spdiags_ops.h>
#include <ATen/ops/norm_native.h>
#include <ATen/ops/norm_ops.h>
#include <ATen/ops/norm_native.h>
#include <ATen/ops/norm_ops.h>
#include <ATen/ops/norm_native.h>
#include <ATen/ops/norm_ops.h>
#include <ATen/ops/norm_native.h>
#include <ATen/ops/norm_ops.h>
#include <ATen/ops/norm_native.h>
#include <ATen/ops/norm_ops.h>
#include <ATen/ops/norm_native.h>
#include <ATen/ops/norm_ops.h>
#include <ATen/ops/norm_native.h>
#include <ATen/ops/norm_ops.h>
#include <ATen/ops/norm_native.h>
#include <ATen/ops/norm_ops.h>
#include <ATen/ops/norm_native.h>
#include <ATen/ops/norm_ops.h>
#include <ATen/ops/norm_native.h>
#include <ATen/ops/norm_ops.h>
#include <ATen/ops/norm_native.h>
#include <ATen/ops/norm_ops.h>
#include <ATen/ops/norm_native.h>
#include <ATen/ops/norm_ops.h>
#include <ATen/ops/frexp_native.h>
#include <ATen/ops/frexp_ops.h>
#include <ATen/ops/frexp_native.h>
#include <ATen/ops/frexp_ops.h>
#include <ATen/ops/frobenius_norm_native.h>
#include <ATen/ops/frobenius_norm_ops.h>
#include <ATen/ops/frobenius_norm_native.h>
#include <ATen/ops/frobenius_norm_ops.h>
#include <ATen/ops/nuclear_norm_native.h>
#include <ATen/ops/nuclear_norm_ops.h>
#include <ATen/ops/nuclear_norm_native.h>
#include <ATen/ops/nuclear_norm_ops.h>
#include <ATen/ops/nuclear_norm_native.h>
#include <ATen/ops/nuclear_norm_ops.h>
#include <ATen/ops/nuclear_norm_native.h>
#include <ATen/ops/nuclear_norm_ops.h>
#include <ATen/ops/clone_native.h>
#include <ATen/ops/clone_ops.h>
#include <ATen/ops/clone_native.h>
#include <ATen/ops/clone_ops.h>
#include <ATen/ops/resize_as_native.h>
#include <ATen/ops/resize_as_ops.h>
#include <ATen/ops/resize_as_native.h>
#include <ATen/ops/resize_as_ops.h>
#include <ATen/ops/resize_as_native.h>
#include <ATen/ops/resize_as_ops.h>
#include <ATen/ops/resize_as_sparse_native.h>
#include <ATen/ops/resize_as_sparse_ops.h>
#include <ATen/ops/resize_as_sparse_native.h>
#include <ATen/ops/resize_as_sparse_ops.h>
#include <ATen/ops/resize_as_sparse_native.h>
#include <ATen/ops/resize_as_sparse_ops.h>
#include <ATen/ops/zero_native.h>
#include <ATen/ops/zero_ops.h>
#include <ATen/ops/zero_native.h>
#include <ATen/ops/zero_ops.h>
#include <ATen/ops/zero_native.h>
#include <ATen/ops/zero_ops.h>
#include <ATen/ops/sub_native.h>
#include <ATen/ops/sub_ops.h>
#include <ATen/ops/sub_native.h>
#include <ATen/ops/sub_ops.h>
#include <ATen/ops/sub_native.h>
#include <ATen/ops/sub_ops.h>
#include <ATen/ops/sub_native.h>
#include <ATen/ops/sub_ops.h>
#include <ATen/ops/sub_native.h>
#include <ATen/ops/sub_ops.h>
#include <ATen/ops/sub_native.h>
#include <ATen/ops/sub_ops.h>
#include <ATen/ops/subtract_native.h>
#include <ATen/ops/subtract_ops.h>
#include <ATen/ops/subtract_native.h>
#include <ATen/ops/subtract_ops.h>
#include <ATen/ops/subtract_native.h>
#include <ATen/ops/subtract_ops.h>
#include <ATen/ops/rsub_native.h>
#include <ATen/ops/rsub_ops.h>
#include <ATen/ops/rsub_native.h>
#include <ATen/ops/rsub_ops.h>
#include <ATen/ops/heaviside_native.h>
#include <ATen/ops/heaviside_ops.h>
#include <ATen/ops/heaviside_native.h>
#include <ATen/ops/heaviside_ops.h>
#include <ATen/ops/heaviside_native.h>
#include <ATen/ops/heaviside_ops.h>
#include <ATen/ops/rsub_native.h>
#include <ATen/ops/rsub_ops.h>
#include <ATen/ops/rsub_native.h>
#include <ATen/ops/rsub_ops.h>
#include <ATen/ops/_sparse_addmm_native.h>
#include <ATen/ops/_sparse_addmm_ops.h>
#include <ATen/ops/_sparse_addmm_native.h>
#include <ATen/ops/_sparse_addmm_ops.h>
#include <ATen/ops/sparse_sampled_addmm_native.h>
#include <ATen/ops/sparse_sampled_addmm_ops.h>
#include <ATen/ops/sparse_sampled_addmm_native.h>
#include <ATen/ops/sparse_sampled_addmm_ops.h>
#include <ATen/ops/addmm_native.h>
#include <ATen/ops/addmm_ops.h>
#include <ATen/ops/addmm_native.h>
#include <ATen/ops/addmm_ops.h>
#include <ATen/ops/addmm_native.h>
#include <ATen/ops/addmm_ops.h>
#include <ATen/ops/_addmm_activation_native.h>
#include <ATen/ops/_addmm_activation_ops.h>
#include <ATen/ops/_addmm_activation_native.h>
#include <ATen/ops/_addmm_activation_ops.h>
#include <ATen/ops/_scaled_mm_native.h>
#include <ATen/ops/_scaled_mm_ops.h>
#include <ATen/ops/_scaled_mm_native.h>
#include <ATen/ops/_scaled_mm_ops.h>
#include <ATen/ops/sparse_coo_tensor_native.h>
#include <ATen/ops/sparse_coo_tensor_ops.h>
#include <ATen/ops/sparse_coo_tensor_native.h>
#include <ATen/ops/sparse_coo_tensor_ops.h>
#include <ATen/ops/_sparse_coo_tensor_with_dims_native.h>
#include <ATen/ops/_sparse_coo_tensor_with_dims_ops.h>
#include <ATen/ops/_sparse_coo_tensor_with_dims_native.h>
#include <ATen/ops/_sparse_coo_tensor_with_dims_ops.h>
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_native.h>
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_ops.h>
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_native.h>
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_ops.h>
#include <ATen/ops/sparse_resize_native.h>
#include <ATen/ops/sparse_resize_ops.h>
#include <ATen/ops/sparse_resize_native.h>
#include <ATen/ops/sparse_resize_ops.h>
#include <ATen/ops/sparse_resize_native.h>
#include <ATen/ops/sparse_resize_ops.h>
#include <ATen/ops/sparse_resize_and_clear_native.h>
#include <ATen/ops/sparse_resize_and_clear_ops.h>
#include <ATen/ops/sparse_resize_and_clear_native.h>
#include <ATen/ops/sparse_resize_and_clear_ops.h>
#include <ATen/ops/sparse_resize_and_clear_native.h>
#include <ATen/ops/sparse_resize_and_clear_ops.h>
#include <ATen/ops/sparse_mask_native.h>
#include <ATen/ops/sparse_mask_ops.h>
#include <ATen/ops/sparse_mask_native.h>
#include <ATen/ops/sparse_mask_ops.h>
#include <ATen/ops/_sparse_mask_projection_native.h>
#include <ATen/ops/_sparse_mask_projection_ops.h>
#include <ATen/ops/_sparse_mask_projection_native.h>
#include <ATen/ops/_sparse_mask_projection_ops.h>
#include <ATen/ops/_to_dense_native.h>
#include <ATen/ops/_to_dense_ops.h>
#include <ATen/ops/_to_dense_native.h>
#include <ATen/ops/_to_dense_ops.h>
#include <ATen/ops/_coalesce_native.h>
#include <ATen/ops/_coalesce_ops.h>
#include <ATen/ops/_coalesce_native.h>
#include <ATen/ops/_coalesce_ops.h>
#include <ATen/ops/_coalesced_native.h>
#include <ATen/ops/_coalesced_ops.h>
#include <ATen/ops/_coalesced_native.h>
#include <ATen/ops/_coalesced_ops.h>
#include <ATen/ops/_coalesced_native.h>
#include <ATen/ops/_coalesced_ops.h>
#include <ATen/ops/hspmm_native.h>
#include <ATen/ops/hspmm_ops.h>
#include <ATen/ops/hspmm_native.h>
#include <ATen/ops/hspmm_ops.h>
#include <ATen/ops/copy_sparse_to_sparse_native.h>
#include <ATen/ops/copy_sparse_to_sparse_ops.h>
#include <ATen/ops/copy_sparse_to_sparse_native.h>
#include <ATen/ops/copy_sparse_to_sparse_ops.h>
#include <ATen/ops/copy_sparse_to_sparse_native.h>
#include <ATen/ops/copy_sparse_to_sparse_ops.h>
#include <ATen/ops/_to_sparse_native.h>
#include <ATen/ops/_to_sparse_ops.h>
#include <ATen/ops/_to_sparse_native.h>
#include <ATen/ops/_to_sparse_ops.h>
#include <ATen/ops/_to_sparse_native.h>
#include <ATen/ops/_to_sparse_ops.h>
#include <ATen/ops/_to_sparse_native.h>
#include <ATen/ops/_to_sparse_ops.h>
#include <ATen/ops/_to_sparse_csr_native.h>
#include <ATen/ops/_to_sparse_csr_ops.h>
#include <ATen/ops/_to_sparse_csr_native.h>
#include <ATen/ops/_to_sparse_csr_ops.h>
#include <ATen/ops/_to_sparse_csc_native.h>
#include <ATen/ops/_to_sparse_csc_ops.h>
#include <ATen/ops/_to_sparse_csc_native.h>
#include <ATen/ops/_to_sparse_csc_ops.h>
#include <ATen/ops/_to_sparse_bsr_native.h>
#include <ATen/ops/_to_sparse_bsr_ops.h>
#include <ATen/ops/_to_sparse_bsr_native.h>
#include <ATen/ops/_to_sparse_bsr_ops.h>
#include <ATen/ops/_to_sparse_bsc_native.h>
#include <ATen/ops/_to_sparse_bsc_ops.h>
#include <ATen/ops/_to_sparse_bsc_native.h>
#include <ATen/ops/_to_sparse_bsc_ops.h>
#include <ATen/ops/to_mkldnn_native.h>
#include <ATen/ops/to_mkldnn_ops.h>
#include <ATen/ops/to_mkldnn_native.h>
#include <ATen/ops/to_mkldnn_ops.h>
#include <ATen/ops/mkldnn_reorder_conv2d_weight_native.h>
#include <ATen/ops/mkldnn_reorder_conv2d_weight_ops.h>
#include <ATen/ops/mkldnn_reorder_conv2d_weight_native.h>
#include <ATen/ops/mkldnn_reorder_conv2d_weight_ops.h>
#include <ATen/ops/mkldnn_reorder_conv3d_weight_native.h>
#include <ATen/ops/mkldnn_reorder_conv3d_weight_ops.h>
#include <ATen/ops/mkldnn_reorder_conv3d_weight_native.h>
#include <ATen/ops/mkldnn_reorder_conv3d_weight_ops.h>
#include <ATen/ops/quantize_per_tensor_dynamic_native.h>
#include <ATen/ops/quantize_per_tensor_dynamic_ops.h>
#include <ATen/ops/quantize_per_tensor_dynamic_native.h>
#include <ATen/ops/quantize_per_tensor_dynamic_ops.h>
#include <ATen/ops/quantize_per_tensor_native.h>
#include <ATen/ops/quantize_per_tensor_ops.h>
#include <ATen/ops/quantize_per_tensor_native.h>
#include <ATen/ops/quantize_per_tensor_ops.h>
#include <ATen/ops/quantize_per_tensor_native.h>
#include <ATen/ops/quantize_per_tensor_ops.h>
#include <ATen/ops/quantize_per_tensor_native.h>
#include <ATen/ops/quantize_per_tensor_ops.h>
#include <ATen/ops/quantize_per_tensor_native.h>
#include <ATen/ops/quantize_per_tensor_ops.h>
#include <ATen/ops/quantize_per_tensor_native.h>
#include <ATen/ops/quantize_per_tensor_ops.h>
#include <ATen/ops/quantize_per_channel_native.h>
#include <ATen/ops/quantize_per_channel_ops.h>
#include <ATen/ops/quantize_per_channel_native.h>
#include <ATen/ops/quantize_per_channel_ops.h>
#include <ATen/ops/dequantize_native.h>
#include <ATen/ops/dequantize_ops.h>
#include <ATen/ops/dequantize_native.h>
#include <ATen/ops/dequantize_ops.h>
#include <ATen/ops/dequantize_native.h>
#include <ATen/ops/dequantize_ops.h>
#include <ATen/ops/dequantize_native.h>
#include <ATen/ops/dequantize_ops.h>
#include <ATen/ops/q_per_channel_scales_native.h>
#include <ATen/ops/q_per_channel_scales_ops.h>
#include <ATen/ops/q_per_channel_scales_native.h>
#include <ATen/ops/q_per_channel_scales_ops.h>
#include <ATen/ops/q_per_channel_zero_points_native.h>
#include <ATen/ops/q_per_channel_zero_points_ops.h>
#include <ATen/ops/q_per_channel_zero_points_native.h>
#include <ATen/ops/q_per_channel_zero_points_ops.h>
#include <ATen/ops/int_repr_native.h>
#include <ATen/ops/int_repr_ops.h>
#include <ATen/ops/int_repr_native.h>
#include <ATen/ops/int_repr_ops.h>
#include <ATen/ops/_make_per_tensor_quantized_tensor_native.h>
#include <ATen/ops/_make_per_tensor_quantized_tensor_ops.h>
#include <ATen/ops/_make_per_tensor_quantized_tensor_native.h>
#include <ATen/ops/_make_per_tensor_quantized_tensor_ops.h>
#include <ATen/ops/_make_per_channel_quantized_tensor_native.h>
#include <ATen/ops/_make_per_channel_quantized_tensor_ops.h>
#include <ATen/ops/_make_per_channel_quantized_tensor_native.h>
#include <ATen/ops/_make_per_channel_quantized_tensor_ops.h>
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_native.h>
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_ops.h>
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_native.h>
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_ops.h>
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_native.h>
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_ops.h>
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_native.h>
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_ops.h>
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_native.h>
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_ops.h>
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_native.h>
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_ops.h>
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_native.h>
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_ops.h>
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_native.h>
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_ops.h>
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_native.h>
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_ops.h>
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_native.h>
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_ops.h>
#include <ATen/ops/_fused_moving_avg_obs_fq_helper_native.h>
#include <ATen/ops/_fused_moving_avg_obs_fq_helper_ops.h>
#include <ATen/ops/_fused_moving_avg_obs_fq_helper_native.h>
#include <ATen/ops/_fused_moving_avg_obs_fq_helper_ops.h>
#include <ATen/ops/_fused_moving_avg_obs_fq_helper_native.h>
#include <ATen/ops/_fused_moving_avg_obs_fq_helper_ops.h>
#include <ATen/ops/_to_copy_native.h>
#include <ATen/ops/_to_copy_ops.h>
#include <ATen/ops/_to_copy_native.h>
#include <ATen/ops/_to_copy_ops.h>
#include <ATen/ops/_lstm_mps_native.h>
#include <ATen/ops/_lstm_mps_ops.h>
#include <ATen/ops/_lstm_mps_native.h>
#include <ATen/ops/_lstm_mps_ops.h>
#include <ATen/ops/lstm_mps_backward_native.h>
#include <ATen/ops/lstm_mps_backward_ops.h>
#include <ATen/ops/lstm_mps_backward_native.h>
#include <ATen/ops/lstm_mps_backward_ops.h>
#include <ATen/ops/_thnn_fused_lstm_cell_native.h>
#include <ATen/ops/_thnn_fused_lstm_cell_ops.h>
#include <ATen/ops/_thnn_fused_lstm_cell_native.h>
#include <ATen/ops/_thnn_fused_lstm_cell_ops.h>
#include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_native.h>
#include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_ops.h>
#include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_native.h>
#include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_ops.h>
#include <ATen/ops/_thnn_fused_gru_cell_native.h>
#include <ATen/ops/_thnn_fused_gru_cell_ops.h>
#include <ATen/ops/_thnn_fused_gru_cell_native.h>
#include <ATen/ops/_thnn_fused_gru_cell_ops.h>
#include <ATen/ops/_thnn_fused_gru_cell_backward_native.h>
#include <ATen/ops/_thnn_fused_gru_cell_backward_ops.h>
#include <ATen/ops/_thnn_fused_gru_cell_backward_native.h>
#include <ATen/ops/_thnn_fused_gru_cell_backward_ops.h>
#include <ATen/ops/_pack_padded_sequence_native.h>
#include <ATen/ops/_pack_padded_sequence_ops.h>
#include <ATen/ops/_pack_padded_sequence_native.h>
#include <ATen/ops/_pack_padded_sequence_ops.h>
#include <ATen/ops/set_native.h>
#include <ATen/ops/set_ops.h>
#include <ATen/ops/set_native.h>
#include <ATen/ops/set_ops.h>
#include <ATen/ops/set_native.h>
#include <ATen/ops/set_ops.h>
#include <ATen/ops/set_native.h>
#include <ATen/ops/set_ops.h>
#include <ATen/ops/set_native.h>
#include <ATen/ops/set_ops.h>
#include <ATen/ops/set_native.h>
#include <ATen/ops/set_ops.h>
#include <ATen/ops/set_native.h>
#include <ATen/ops/set_ops.h>
#include <ATen/ops/set_native.h>
#include <ATen/ops/set_ops.h>
#include <ATen/ops/set_native.h>
#include <ATen/ops/set_ops.h>
#include <ATen/ops/set_native.h>
#include <ATen/ops/set_ops.h>
#include <ATen/ops/set_native.h>
#include <ATen/ops/set_ops.h>
#include <ATen/ops/set_native.h>
#include <ATen/ops/set_ops.h>
#include <ATen/ops/lift_native.h>
#include <ATen/ops/lift_ops.h>
#include <ATen/ops/lift_native.h>
#include <ATen/ops/lift_ops.h>
#include <ATen/ops/lift_fresh_copy_native.h>
#include <ATen/ops/lift_fresh_copy_ops.h>
#include <ATen/ops/lift_fresh_copy_native.h>
#include <ATen/ops/lift_fresh_copy_ops.h>
#include <ATen/ops/masked_fill_native.h>
#include <ATen/ops/masked_fill_ops.h>
#include <ATen/ops/masked_fill_native.h>
#include <ATen/ops/masked_fill_ops.h>
#include <ATen/ops/masked_fill_native.h>
#include <ATen/ops/masked_fill_ops.h>
#include <ATen/ops/masked_fill_native.h>
#include <ATen/ops/masked_fill_ops.h>
#include <ATen/ops/masked_fill_native.h>
#include <ATen/ops/masked_fill_ops.h>
#include <ATen/ops/masked_fill_native.h>
#include <ATen/ops/masked_fill_ops.h>
#include <ATen/ops/masked_scatter_native.h>
#include <ATen/ops/masked_scatter_ops.h>
#include <ATen/ops/masked_scatter_native.h>
#include <ATen/ops/masked_scatter_ops.h>
#include <ATen/ops/masked_scatter_native.h>
#include <ATen/ops/masked_scatter_ops.h>
#include <ATen/ops/_masked_softmax_native.h>
#include <ATen/ops/_masked_softmax_ops.h>
#include <ATen/ops/_masked_softmax_native.h>
#include <ATen/ops/_masked_softmax_ops.h>
#include <ATen/ops/_masked_softmax_backward_native.h>
#include <ATen/ops/_masked_softmax_backward_ops.h>
#include <ATen/ops/_masked_softmax_backward_native.h>
#include <ATen/ops/_masked_softmax_backward_ops.h>
#include <ATen/ops/put_native.h>
#include <ATen/ops/put_ops.h>
#include <ATen/ops/put_native.h>
#include <ATen/ops/put_ops.h>
#include <ATen/ops/put_native.h>
#include <ATen/ops/put_ops.h>
#include <ATen/ops/index_add_native.h>
#include <ATen/ops/index_add_ops.h>
#include <ATen/ops/index_add_native.h>
#include <ATen/ops/index_add_ops.h>
#include <ATen/ops/index_add_native.h>
#include <ATen/ops/index_add_ops.h>
#include <ATen/ops/index_reduce_native.h>
#include <ATen/ops/index_reduce_ops.h>
#include <ATen/ops/index_reduce_native.h>
#include <ATen/ops/index_reduce_ops.h>
#include <ATen/ops/index_reduce_native.h>
#include <ATen/ops/index_reduce_ops.h>
#include <ATen/ops/index_fill_native.h>
#include <ATen/ops/index_fill_ops.h>
#include <ATen/ops/index_fill_native.h>
#include <ATen/ops/index_fill_ops.h>
#include <ATen/ops/index_fill_native.h>
#include <ATen/ops/index_fill_ops.h>
#include <ATen/ops/index_fill_native.h>
#include <ATen/ops/index_fill_ops.h>
#include <ATen/ops/index_fill_native.h>
#include <ATen/ops/index_fill_ops.h>
#include <ATen/ops/index_fill_native.h>
#include <ATen/ops/index_fill_ops.h>
#include <ATen/ops/scatter_native.h>
#include <ATen/ops/scatter_ops.h>
#include <ATen/ops/scatter_native.h>
#include <ATen/ops/scatter_ops.h>
#include <ATen/ops/scatter_native.h>
#include <ATen/ops/scatter_ops.h>
#include <ATen/ops/scatter_native.h>
#include <ATen/ops/scatter_ops.h>
#include <ATen/ops/scatter_native.h>
#include <ATen/ops/scatter_ops.h>
#include <ATen/ops/scatter_native.h>
#include <ATen/ops/scatter_ops.h>
#include <ATen/ops/scatter_native.h>
#include <ATen/ops/scatter_ops.h>
#include <ATen/ops/scatter_native.h>
#include <ATen/ops/scatter_ops.h>
#include <ATen/ops/scatter_native.h>
#include <ATen/ops/scatter_ops.h>
#include <ATen/ops/scatter_native.h>
#include <ATen/ops/scatter_ops.h>
#include <ATen/ops/scatter_native.h>
#include <ATen/ops/scatter_ops.h>
#include <ATen/ops/scatter_native.h>
#include <ATen/ops/scatter_ops.h>
#include <ATen/ops/scatter_add_native.h>
#include <ATen/ops/scatter_add_ops.h>
#include <ATen/ops/scatter_add_native.h>
#include <ATen/ops/scatter_add_ops.h>
#include <ATen/ops/scatter_add_native.h>
#include <ATen/ops/scatter_add_ops.h>
#include <ATen/ops/scatter_reduce_native.h>
#include <ATen/ops/scatter_reduce_ops.h>
#include <ATen/ops/scatter_reduce_native.h>
#include <ATen/ops/scatter_reduce_ops.h>
#include <ATen/ops/scatter_reduce_native.h>
#include <ATen/ops/scatter_reduce_ops.h>
#include <ATen/ops/eq_native.h>
#include <ATen/ops/eq_ops.h>
#include <ATen/ops/eq_native.h>
#include <ATen/ops/eq_ops.h>
#include <ATen/ops/eq_native.h>
#include <ATen/ops/eq_ops.h>
#include <ATen/ops/eq_native.h>
#include <ATen/ops/eq_ops.h>
#include <ATen/ops/eq_native.h>
#include <ATen/ops/eq_ops.h>
#include <ATen/ops/eq_native.h>
#include <ATen/ops/eq_ops.h>
#include <ATen/ops/bitwise_and_native.h>
#include <ATen/ops/bitwise_and_ops.h>
#include <ATen/ops/bitwise_and_native.h>
#include <ATen/ops/bitwise_and_ops.h>
#include <ATen/ops/bitwise_and_native.h>
#include <ATen/ops/bitwise_and_ops.h>
#include <ATen/ops/bitwise_and_native.h>
#include <ATen/ops/bitwise_and_ops.h>
#include <ATen/ops/bitwise_and_native.h>
#include <ATen/ops/bitwise_and_ops.h>
#include <ATen/ops/bitwise_and_native.h>
#include <ATen/ops/bitwise_and_ops.h>
#include <ATen/ops/bitwise_and_native.h>
#include <ATen/ops/bitwise_and_ops.h>
#include <ATen/ops/bitwise_and_native.h>
#include <ATen/ops/bitwise_and_ops.h>
#include <ATen/ops/bitwise_or_native.h>
#include <ATen/ops/bitwise_or_ops.h>
#include <ATen/ops/bitwise_or_native.h>
#include <ATen/ops/bitwise_or_ops.h>
#include <ATen/ops/bitwise_or_native.h>
#include <ATen/ops/bitwise_or_ops.h>
#include <ATen/ops/bitwise_or_native.h>
#include <ATen/ops/bitwise_or_ops.h>
#include <ATen/ops/bitwise_or_native.h>
#include <ATen/ops/bitwise_or_ops.h>
#include <ATen/ops/bitwise_or_native.h>
#include <ATen/ops/bitwise_or_ops.h>
#include <ATen/ops/bitwise_or_native.h>
#include <ATen/ops/bitwise_or_ops.h>
#include <ATen/ops/bitwise_or_native.h>
#include <ATen/ops/bitwise_or_ops.h>
#include <ATen/ops/bitwise_xor_native.h>
#include <ATen/ops/bitwise_xor_ops.h>
#include <ATen/ops/bitwise_xor_native.h>
#include <ATen/ops/bitwise_xor_ops.h>
#include <ATen/ops/bitwise_xor_native.h>
#include <ATen/ops/bitwise_xor_ops.h>
#include <ATen/ops/bitwise_xor_native.h>
#include <ATen/ops/bitwise_xor_ops.h>
#include <ATen/ops/bitwise_xor_native.h>
#include <ATen/ops/bitwise_xor_ops.h>
#include <ATen/ops/bitwise_xor_native.h>
#include <ATen/ops/bitwise_xor_ops.h>
#include <ATen/ops/bitwise_xor_native.h>
#include <ATen/ops/bitwise_xor_ops.h>
#include <ATen/ops/bitwise_xor_native.h>
#include <ATen/ops/bitwise_xor_ops.h>
#include <ATen/ops/lshift_native.h>
#include <ATen/ops/lshift_ops.h>
#include <ATen/ops/lshift_native.h>
#include <ATen/ops/lshift_ops.h>
#include <ATen/ops/lshift_native.h>
#include <ATen/ops/lshift_ops.h>
#include <ATen/ops/lshift_native.h>
#include <ATen/ops/lshift_ops.h>
#include <ATen/ops/lshift_native.h>
#include <ATen/ops/lshift_ops.h>
#include <ATen/ops/lshift_native.h>
#include <ATen/ops/lshift_ops.h>
#include <ATen/ops/bitwise_left_shift_native.h>
#include <ATen/ops/bitwise_left_shift_ops.h>
#include <ATen/ops/bitwise_left_shift_native.h>
#include <ATen/ops/bitwise_left_shift_ops.h>
#include <ATen/ops/bitwise_left_shift_native.h>
#include <ATen/ops/bitwise_left_shift_ops.h>
#include <ATen/ops/bitwise_left_shift_native.h>
#include <ATen/ops/bitwise_left_shift_ops.h>
#include <ATen/ops/bitwise_left_shift_native.h>
#include <ATen/ops/bitwise_left_shift_ops.h>
#include <ATen/ops/bitwise_left_shift_native.h>
#include <ATen/ops/bitwise_left_shift_ops.h>
#include <ATen/ops/bitwise_left_shift_native.h>
#include <ATen/ops/bitwise_left_shift_ops.h>
#include <ATen/ops/bitwise_left_shift_native.h>
#include <ATen/ops/bitwise_left_shift_ops.h>
#include <ATen/ops/rshift_native.h>
#include <ATen/ops/rshift_ops.h>
#include <ATen/ops/rshift_native.h>
#include <ATen/ops/rshift_ops.h>
#include <ATen/ops/rshift_native.h>
#include <ATen/ops/rshift_ops.h>
#include <ATen/ops/rshift_native.h>
#include <ATen/ops/rshift_ops.h>
#include <ATen/ops/rshift_native.h>
#include <ATen/ops/rshift_ops.h>
#include <ATen/ops/rshift_native.h>
#include <ATen/ops/rshift_ops.h>
#include <ATen/ops/bitwise_right_shift_native.h>
#include <ATen/ops/bitwise_right_shift_ops.h>
#include <ATen/ops/bitwise_right_shift_native.h>
#include <ATen/ops/bitwise_right_shift_ops.h>
#include <ATen/ops/bitwise_right_shift_native.h>
#include <ATen/ops/bitwise_right_shift_ops.h>
#include <ATen/ops/bitwise_right_shift_native.h>
#include <ATen/ops/bitwise_right_shift_ops.h>
#include <ATen/ops/bitwise_right_shift_native.h>
#include <ATen/ops/bitwise_right_shift_ops.h>
#include <ATen/ops/bitwise_right_shift_native.h>
#include <ATen/ops/bitwise_right_shift_ops.h>
#include <ATen/ops/bitwise_right_shift_native.h>
#include <ATen/ops/bitwise_right_shift_ops.h>
#include <ATen/ops/bitwise_right_shift_native.h>
#include <ATen/ops/bitwise_right_shift_ops.h>
#include <ATen/ops/tril_native.h>
#include <ATen/ops/tril_ops.h>
#include <ATen/ops/tril_native.h>
#include <ATen/ops/tril_ops.h>
#include <ATen/ops/tril_native.h>
#include <ATen/ops/tril_ops.h>
#include <ATen/ops/triu_native.h>
#include <ATen/ops/triu_ops.h>
#include <ATen/ops/triu_native.h>
#include <ATen/ops/triu_ops.h>
#include <ATen/ops/triu_native.h>
#include <ATen/ops/triu_ops.h>
#include <ATen/ops/digamma_native.h>
#include <ATen/ops/digamma_ops.h>
#include <ATen/ops/digamma_native.h>
#include <ATen/ops/digamma_ops.h>
#include <ATen/ops/digamma_native.h>
#include <ATen/ops/digamma_ops.h>
#include <ATen/ops/lerp_native.h>
#include <ATen/ops/lerp_ops.h>
#include <ATen/ops/lerp_native.h>
#include <ATen/ops/lerp_ops.h>
#include <ATen/ops/lerp_native.h>
#include <ATen/ops/lerp_ops.h>
#include <ATen/ops/lerp_native.h>
#include <ATen/ops/lerp_ops.h>
#include <ATen/ops/lerp_native.h>
#include <ATen/ops/lerp_ops.h>
#include <ATen/ops/lerp_native.h>
#include <ATen/ops/lerp_ops.h>
#include <ATen/ops/addbmm_native.h>
#include <ATen/ops/addbmm_ops.h>
#include <ATen/ops/addbmm_native.h>
#include <ATen/ops/addbmm_ops.h>
#include <ATen/ops/addbmm_native.h>
#include <ATen/ops/addbmm_ops.h>
#include <ATen/ops/random_native.h>
#include <ATen/ops/random_ops.h>
#include <ATen/ops/random_native.h>
#include <ATen/ops/random_ops.h>
#include <ATen/ops/random_native.h>
#include <ATen/ops/random_ops.h>
#include <ATen/ops/random_native.h>
#include <ATen/ops/random_ops.h>
#include <ATen/ops/random_native.h>
#include <ATen/ops/random_ops.h>
#include <ATen/ops/random_native.h>
#include <ATen/ops/random_ops.h>
#include <ATen/ops/random_native.h>
#include <ATen/ops/random_ops.h>
#include <ATen/ops/random_native.h>
#include <ATen/ops/random_ops.h>
#include <ATen/ops/random_native.h>
#include <ATen/ops/random_ops.h>
#include <ATen/ops/uniform_native.h>
#include <ATen/ops/uniform_ops.h>
#include <ATen/ops/uniform_native.h>
#include <ATen/ops/uniform_ops.h>
#include <ATen/ops/uniform_native.h>
#include <ATen/ops/uniform_ops.h>
#include <ATen/ops/cauchy_native.h>
#include <ATen/ops/cauchy_ops.h>
#include <ATen/ops/cauchy_native.h>
#include <ATen/ops/cauchy_ops.h>
#include <ATen/ops/cauchy_native.h>
#include <ATen/ops/cauchy_ops.h>
#include <ATen/ops/log_normal_native.h>
#include <ATen/ops/log_normal_ops.h>
#include <ATen/ops/log_normal_native.h>
#include <ATen/ops/log_normal_ops.h>
#include <ATen/ops/log_normal_native.h>
#include <ATen/ops/log_normal_ops.h>
#include <ATen/ops/exponential_native.h>
#include <ATen/ops/exponential_ops.h>
#include <ATen/ops/exponential_native.h>
#include <ATen/ops/exponential_ops.h>
#include <ATen/ops/exponential_native.h>
#include <ATen/ops/exponential_ops.h>
#include <ATen/ops/geometric_native.h>
#include <ATen/ops/geometric_ops.h>
#include <ATen/ops/geometric_native.h>
#include <ATen/ops/geometric_ops.h>
#include <ATen/ops/geometric_native.h>
#include <ATen/ops/geometric_ops.h>
#include <ATen/ops/diag_native.h>
#include <ATen/ops/diag_ops.h>
#include <ATen/ops/diag_native.h>
#include <ATen/ops/diag_ops.h>
#include <ATen/ops/cross_native.h>
#include <ATen/ops/cross_ops.h>
#include <ATen/ops/cross_native.h>
#include <ATen/ops/cross_ops.h>
#include <ATen/ops/tril_indices_native.h>
#include <ATen/ops/tril_indices_ops.h>
#include <ATen/ops/tril_indices_native.h>
#include <ATen/ops/tril_indices_ops.h>
#include <ATen/ops/triu_indices_native.h>
#include <ATen/ops/triu_indices_ops.h>
#include <ATen/ops/triu_indices_native.h>
#include <ATen/ops/triu_indices_ops.h>
#include <ATen/ops/trace_native.h>
#include <ATen/ops/trace_ops.h>
#include <ATen/ops/trace_native.h>
#include <ATen/ops/trace_ops.h>
#include <ATen/ops/ne_native.h>
#include <ATen/ops/ne_ops.h>
#include <ATen/ops/ne_native.h>
#include <ATen/ops/ne_ops.h>
#include <ATen/ops/ne_native.h>
#include <ATen/ops/ne_ops.h>
#include <ATen/ops/ne_native.h>
#include <ATen/ops/ne_ops.h>
#include <ATen/ops/ne_native.h>
#include <ATen/ops/ne_ops.h>
#include <ATen/ops/ne_native.h>
#include <ATen/ops/ne_ops.h>
#include <ATen/ops/not_equal_native.h>
#include <ATen/ops/not_equal_ops.h>
#include <ATen/ops/not_equal_native.h>
#include <ATen/ops/not_equal_ops.h>
#include <ATen/ops/not_equal_native.h>
#include <ATen/ops/not_equal_ops.h>
#include <ATen/ops/not_equal_native.h>
#include <ATen/ops/not_equal_ops.h>
#include <ATen/ops/not_equal_native.h>
#include <ATen/ops/not_equal_ops.h>
#include <ATen/ops/not_equal_native.h>
#include <ATen/ops/not_equal_ops.h>
#include <ATen/ops/ge_native.h>
#include <ATen/ops/ge_ops.h>
#include <ATen/ops/ge_native.h>
#include <ATen/ops/ge_ops.h>
#include <ATen/ops/ge_native.h>
#include <ATen/ops/ge_ops.h>
#include <ATen/ops/ge_native.h>
#include <ATen/ops/ge_ops.h>
#include <ATen/ops/ge_native.h>
#include <ATen/ops/ge_ops.h>
#include <ATen/ops/ge_native.h>
#include <ATen/ops/ge_ops.h>
#include <ATen/ops/greater_equal_native.h>
#include <ATen/ops/greater_equal_ops.h>
#include <ATen/ops/greater_equal_native.h>
#include <ATen/ops/greater_equal_ops.h>
#include <ATen/ops/greater_equal_native.h>
#include <ATen/ops/greater_equal_ops.h>
#include <ATen/ops/greater_equal_native.h>
#include <ATen/ops/greater_equal_ops.h>
#include <ATen/ops/greater_equal_native.h>
#include <ATen/ops/greater_equal_ops.h>
#include <ATen/ops/greater_equal_native.h>
#include <ATen/ops/greater_equal_ops.h>
#include <ATen/ops/le_native.h>
#include <ATen/ops/le_ops.h>
#include <ATen/ops/le_native.h>
#include <ATen/ops/le_ops.h>
#include <ATen/ops/le_native.h>
#include <ATen/ops/le_ops.h>
#include <ATen/ops/le_native.h>
#include <ATen/ops/le_ops.h>
#include <ATen/ops/le_native.h>
#include <ATen/ops/le_ops.h>
#include <ATen/ops/le_native.h>
#include <ATen/ops/le_ops.h>
#include <ATen/ops/less_equal_native.h>
#include <ATen/ops/less_equal_ops.h>
#include <ATen/ops/less_equal_native.h>
#include <ATen/ops/less_equal_ops.h>
#include <ATen/ops/less_equal_native.h>
#include <ATen/ops/less_equal_ops.h>
#include <ATen/ops/less_equal_native.h>
#include <ATen/ops/less_equal_ops.h>
#include <ATen/ops/less_equal_native.h>
#include <ATen/ops/less_equal_ops.h>
#include <ATen/ops/less_equal_native.h>
#include <ATen/ops/less_equal_ops.h>
#include <ATen/ops/gt_native.h>
#include <ATen/ops/gt_ops.h>
#include <ATen/ops/gt_native.h>
#include <ATen/ops/gt_ops.h>
#include <ATen/ops/gt_native.h>
#include <ATen/ops/gt_ops.h>
#include <ATen/ops/gt_native.h>
#include <ATen/ops/gt_ops.h>
#include <ATen/ops/gt_native.h>
#include <ATen/ops/gt_ops.h>
#include <ATen/ops/gt_native.h>
#include <ATen/ops/gt_ops.h>
#include <ATen/ops/greater_native.h>
#include <ATen/ops/greater_ops.h>
#include <ATen/ops/greater_native.h>
#include <ATen/ops/greater_ops.h>
#include <ATen/ops/greater_native.h>
#include <ATen/ops/greater_ops.h>
#include <ATen/ops/greater_native.h>
#include <ATen/ops/greater_ops.h>
#include <ATen/ops/greater_native.h>
#include <ATen/ops/greater_ops.h>
#include <ATen/ops/greater_native.h>
#include <ATen/ops/greater_ops.h>
#include <ATen/ops/lt_native.h>
#include <ATen/ops/lt_ops.h>
#include <ATen/ops/lt_native.h>
#include <ATen/ops/lt_ops.h>
#include <ATen/ops/lt_native.h>
#include <ATen/ops/lt_ops.h>
#include <ATen/ops/lt_native.h>
#include <ATen/ops/lt_ops.h>
#include <ATen/ops/lt_native.h>
#include <ATen/ops/lt_ops.h>
#include <ATen/ops/lt_native.h>
#include <ATen/ops/lt_ops.h>
#include <ATen/ops/less_native.h>
#include <ATen/ops/less_ops.h>
#include <ATen/ops/less_native.h>
#include <ATen/ops/less_ops.h>
#include <ATen/ops/less_native.h>
#include <ATen/ops/less_ops.h>
#include <ATen/ops/less_native.h>
#include <ATen/ops/less_ops.h>
#include <ATen/ops/less_native.h>
#include <ATen/ops/less_ops.h>
#include <ATen/ops/less_native.h>
#include <ATen/ops/less_ops.h>
#include <ATen/ops/take_native.h>
#include <ATen/ops/take_ops.h>
#include <ATen/ops/take_native.h>
#include <ATen/ops/take_ops.h>
#include <ATen/ops/take_along_dim_native.h>
#include <ATen/ops/take_along_dim_ops.h>
#include <ATen/ops/take_along_dim_native.h>
#include <ATen/ops/take_along_dim_ops.h>
#include <ATen/ops/index_select_native.h>
#include <ATen/ops/index_select_ops.h>
#include <ATen/ops/index_select_native.h>
#include <ATen/ops/index_select_ops.h>
#include <ATen/ops/index_select_native.h>
#include <ATen/ops/index_select_ops.h>
#include <ATen/ops/index_select_native.h>
#include <ATen/ops/index_select_ops.h>
#include <ATen/ops/masked_select_native.h>
#include <ATen/ops/masked_select_ops.h>
#include <ATen/ops/masked_select_native.h>
#include <ATen/ops/masked_select_ops.h>
#include <ATen/ops/nonzero_native.h>
#include <ATen/ops/nonzero_ops.h>
#include <ATen/ops/nonzero_native.h>
#include <ATen/ops/nonzero_ops.h>
#include <ATen/ops/nonzero_static_native.h>
#include <ATen/ops/nonzero_static_ops.h>
#include <ATen/ops/nonzero_static_native.h>
#include <ATen/ops/nonzero_static_ops.h>
#include <ATen/ops/gather_native.h>
#include <ATen/ops/gather_ops.h>
#include <ATen/ops/gather_native.h>
#include <ATen/ops/gather_ops.h>
#include <ATen/ops/gather_native.h>
#include <ATen/ops/gather_ops.h>
#include <ATen/ops/gather_native.h>
#include <ATen/ops/gather_ops.h>
#include <ATen/ops/addcmul_native.h>
#include <ATen/ops/addcmul_ops.h>
#include <ATen/ops/addcmul_native.h>
#include <ATen/ops/addcmul_ops.h>
#include <ATen/ops/addcmul_native.h>
#include <ATen/ops/addcmul_ops.h>
#include <ATen/ops/addcdiv_native.h>
#include <ATen/ops/addcdiv_ops.h>
#include <ATen/ops/addcdiv_native.h>
#include <ATen/ops/addcdiv_ops.h>
#include <ATen/ops/addcdiv_native.h>
#include <ATen/ops/addcdiv_ops.h>
#include <ATen/ops/triangular_solve_native.h>
#include <ATen/ops/triangular_solve_ops.h>
#include <ATen/ops/triangular_solve_native.h>
#include <ATen/ops/triangular_solve_ops.h>
#include <ATen/ops/linalg_solve_triangular_native.h>
#include <ATen/ops/linalg_solve_triangular_ops.h>
#include <ATen/ops/linalg_solve_triangular_native.h>
#include <ATen/ops/linalg_solve_triangular_ops.h>
#include <ATen/ops/svd_native.h>
#include <ATen/ops/svd_ops.h>
#include <ATen/ops/svd_native.h>
#include <ATen/ops/svd_ops.h>
#include <ATen/ops/cholesky_native.h>
#include <ATen/ops/cholesky_ops.h>
#include <ATen/ops/cholesky_native.h>
#include <ATen/ops/cholesky_ops.h>
#include <ATen/ops/cholesky_solve_native.h>
#include <ATen/ops/cholesky_solve_ops.h>
#include <ATen/ops/cholesky_solve_native.h>
#include <ATen/ops/cholesky_solve_ops.h>
#include <ATen/ops/_cholesky_solve_helper_native.h>
#include <ATen/ops/_cholesky_solve_helper_ops.h>
#include <ATen/ops/_cholesky_solve_helper_native.h>
#include <ATen/ops/_cholesky_solve_helper_ops.h>
#include <ATen/ops/cholesky_inverse_native.h>
#include <ATen/ops/cholesky_inverse_ops.h>
#include <ATen/ops/cholesky_inverse_native.h>
#include <ATen/ops/cholesky_inverse_ops.h>
#include <ATen/ops/qr_native.h>
#include <ATen/ops/qr_ops.h>
#include <ATen/ops/qr_native.h>
#include <ATen/ops/qr_ops.h>
#include <ATen/ops/geqrf_native.h>
#include <ATen/ops/geqrf_ops.h>
#include <ATen/ops/geqrf_native.h>
#include <ATen/ops/geqrf_ops.h>
#include <ATen/ops/orgqr_native.h>
#include <ATen/ops/orgqr_ops.h>
#include <ATen/ops/orgqr_native.h>
#include <ATen/ops/orgqr_ops.h>
#include <ATen/ops/ormqr_native.h>
#include <ATen/ops/ormqr_ops.h>
#include <ATen/ops/ormqr_native.h>
#include <ATen/ops/ormqr_ops.h>
#include <ATen/ops/lu_solve_native.h>
#include <ATen/ops/lu_solve_ops.h>
#include <ATen/ops/lu_solve_native.h>
#include <ATen/ops/lu_solve_ops.h>
#include <ATen/ops/lu_unpack_native.h>
#include <ATen/ops/lu_unpack_ops.h>
#include <ATen/ops/lu_unpack_native.h>
#include <ATen/ops/lu_unpack_ops.h>
#include <ATen/ops/multinomial_native.h>
#include <ATen/ops/multinomial_ops.h>
#include <ATen/ops/multinomial_native.h>
#include <ATen/ops/multinomial_ops.h>
#include <ATen/ops/lgamma_native.h>
#include <ATen/ops/lgamma_ops.h>
#include <ATen/ops/lgamma_native.h>
#include <ATen/ops/lgamma_ops.h>
#include <ATen/ops/lgamma_native.h>
#include <ATen/ops/lgamma_ops.h>
#include <ATen/ops/polygamma_native.h>
#include <ATen/ops/polygamma_ops.h>
#include <ATen/ops/polygamma_native.h>
#include <ATen/ops/polygamma_ops.h>
#include <ATen/ops/erfinv_native.h>
#include <ATen/ops/erfinv_ops.h>
#include <ATen/ops/erfinv_native.h>
#include <ATen/ops/erfinv_ops.h>
#include <ATen/ops/erfinv_native.h>
#include <ATen/ops/erfinv_ops.h>
#include <ATen/ops/i0_native.h>
#include <ATen/ops/i0_ops.h>
#include <ATen/ops/i0_native.h>
#include <ATen/ops/i0_ops.h>
#include <ATen/ops/i0_native.h>
#include <ATen/ops/i0_ops.h>
#include <ATen/ops/sign_native.h>
#include <ATen/ops/sign_ops.h>
#include <ATen/ops/sign_native.h>
#include <ATen/ops/sign_ops.h>
#include <ATen/ops/sign_native.h>
#include <ATen/ops/sign_ops.h>
#include <ATen/ops/signbit_native.h>
#include <ATen/ops/signbit_ops.h>
#include <ATen/ops/signbit_native.h>
#include <ATen/ops/signbit_ops.h>
#include <ATen/ops/dist_native.h>
#include <ATen/ops/dist_ops.h>
#include <ATen/ops/dist_native.h>
#include <ATen/ops/dist_ops.h>
#include <ATen/ops/atan2_native.h>
#include <ATen/ops/atan2_ops.h>
#include <ATen/ops/atan2_native.h>
#include <ATen/ops/atan2_ops.h>
#include <ATen/ops/atan2_native.h>
#include <ATen/ops/atan2_ops.h>
#include <ATen/ops/arctan2_native.h>
#include <ATen/ops/arctan2_ops.h>
#include <ATen/ops/arctan2_native.h>
#include <ATen/ops/arctan2_ops.h>
#include <ATen/ops/arctan2_native.h>
#include <ATen/ops/arctan2_ops.h>
#include <ATen/ops/histc_native.h>
#include <ATen/ops/histc_ops.h>
#include <ATen/ops/histc_native.h>
#include <ATen/ops/histc_ops.h>
#include <ATen/ops/histogram_native.h>
#include <ATen/ops/histogram_ops.h>
#include <ATen/ops/histogram_native.h>
#include <ATen/ops/histogram_ops.h>
#include <ATen/ops/histogram_native.h>
#include <ATen/ops/histogram_ops.h>
#include <ATen/ops/histogram_native.h>
#include <ATen/ops/histogram_ops.h>
#include <ATen/ops/_histogramdd_bin_edges_native.h>
#include <ATen/ops/_histogramdd_bin_edges_ops.h>
#include <ATen/ops/_histogramdd_bin_edges_native.h>
#include <ATen/ops/_histogramdd_bin_edges_ops.h>
#include <ATen/ops/_histogramdd_from_bin_cts_native.h>
#include <ATen/ops/_histogramdd_from_bin_cts_ops.h>
#include <ATen/ops/_histogramdd_from_bin_cts_native.h>
#include <ATen/ops/_histogramdd_from_bin_cts_ops.h>
#include <ATen/ops/_histogramdd_from_bin_tensors_native.h>
#include <ATen/ops/_histogramdd_from_bin_tensors_ops.h>
#include <ATen/ops/_histogramdd_from_bin_tensors_native.h>
#include <ATen/ops/_histogramdd_from_bin_tensors_ops.h>
#include <ATen/ops/fmod_native.h>
#include <ATen/ops/fmod_ops.h>
#include <ATen/ops/fmod_native.h>
#include <ATen/ops/fmod_ops.h>
#include <ATen/ops/fmod_native.h>
#include <ATen/ops/fmod_ops.h>
#include <ATen/ops/fmod_native.h>
#include <ATen/ops/fmod_ops.h>
#include <ATen/ops/fmod_native.h>
#include <ATen/ops/fmod_ops.h>
#include <ATen/ops/fmod_native.h>
#include <ATen/ops/fmod_ops.h>
#include <ATen/ops/hypot_native.h>
#include <ATen/ops/hypot_ops.h>
#include <ATen/ops/hypot_native.h>
#include <ATen/ops/hypot_ops.h>
#include <ATen/ops/hypot_native.h>
#include <ATen/ops/hypot_ops.h>
#include <ATen/ops/igamma_native.h>
#include <ATen/ops/igamma_ops.h>
#include <ATen/ops/igamma_native.h>
#include <ATen/ops/igamma_ops.h>
#include <ATen/ops/igamma_native.h>
#include <ATen/ops/igamma_ops.h>
#include <ATen/ops/igammac_native.h>
#include <ATen/ops/igammac_ops.h>
#include <ATen/ops/igammac_native.h>
#include <ATen/ops/igammac_ops.h>
#include <ATen/ops/igammac_native.h>
#include <ATen/ops/igammac_ops.h>
#include <ATen/ops/nextafter_native.h>
#include <ATen/ops/nextafter_ops.h>
#include <ATen/ops/nextafter_native.h>
#include <ATen/ops/nextafter_ops.h>
#include <ATen/ops/nextafter_native.h>
#include <ATen/ops/nextafter_ops.h>
#include <ATen/ops/remainder_native.h>
#include <ATen/ops/remainder_ops.h>
#include <ATen/ops/remainder_native.h>
#include <ATen/ops/remainder_ops.h>
#include <ATen/ops/remainder_native.h>
#include <ATen/ops/remainder_ops.h>
#include <ATen/ops/remainder_native.h>
#include <ATen/ops/remainder_ops.h>
#include <ATen/ops/remainder_native.h>
#include <ATen/ops/remainder_ops.h>
#include <ATen/ops/remainder_native.h>
#include <ATen/ops/remainder_ops.h>
#include <ATen/ops/remainder_native.h>
#include <ATen/ops/remainder_ops.h>
#include <ATen/ops/remainder_native.h>
#include <ATen/ops/remainder_ops.h>
#include <ATen/ops/min_native.h>
#include <ATen/ops/min_ops.h>
#include <ATen/ops/min_native.h>
#include <ATen/ops/min_ops.h>
#include <ATen/ops/fmin_native.h>
#include <ATen/ops/fmin_ops.h>
#include <ATen/ops/fmin_native.h>
#include <ATen/ops/fmin_ops.h>
#include <ATen/ops/max_native.h>
#include <ATen/ops/max_ops.h>
#include <ATen/ops/max_native.h>
#include <ATen/ops/max_ops.h>
#include <ATen/ops/fmax_native.h>
#include <ATen/ops/fmax_ops.h>
#include <ATen/ops/fmax_native.h>
#include <ATen/ops/fmax_ops.h>
#include <ATen/ops/maximum_native.h>
#include <ATen/ops/maximum_ops.h>
#include <ATen/ops/maximum_native.h>
#include <ATen/ops/maximum_ops.h>
#include <ATen/ops/max_native.h>
#include <ATen/ops/max_ops.h>
#include <ATen/ops/max_native.h>
#include <ATen/ops/max_ops.h>
#include <ATen/ops/minimum_native.h>
#include <ATen/ops/minimum_ops.h>
#include <ATen/ops/minimum_native.h>
#include <ATen/ops/minimum_ops.h>
#include <ATen/ops/min_native.h>
#include <ATen/ops/min_ops.h>
#include <ATen/ops/min_native.h>
#include <ATen/ops/min_ops.h>
#include <ATen/ops/quantile_native.h>
#include <ATen/ops/quantile_ops.h>
#include <ATen/ops/quantile_native.h>
#include <ATen/ops/quantile_ops.h>
#include <ATen/ops/quantile_native.h>
#include <ATen/ops/quantile_ops.h>
#include <ATen/ops/quantile_native.h>
#include <ATen/ops/quantile_ops.h>
#include <ATen/ops/nanquantile_native.h>
#include <ATen/ops/nanquantile_ops.h>
#include <ATen/ops/nanquantile_native.h>
#include <ATen/ops/nanquantile_ops.h>
#include <ATen/ops/nanquantile_native.h>
#include <ATen/ops/nanquantile_ops.h>
#include <ATen/ops/nanquantile_native.h>
#include <ATen/ops/nanquantile_ops.h>
#include <ATen/ops/sort_native.h>
#include <ATen/ops/sort_ops.h>
#include <ATen/ops/sort_native.h>
#include <ATen/ops/sort_ops.h>
#include <ATen/ops/sort_native.h>
#include <ATen/ops/sort_ops.h>
#include <ATen/ops/sort_native.h>
#include <ATen/ops/sort_ops.h>
#include <ATen/ops/sort_native.h>
#include <ATen/ops/sort_ops.h>
#include <ATen/ops/sort_native.h>
#include <ATen/ops/sort_ops.h>
#include <ATen/ops/sort_native.h>
#include <ATen/ops/sort_ops.h>
#include <ATen/ops/sort_native.h>
#include <ATen/ops/sort_ops.h>
#include <ATen/ops/msort_native.h>
#include <ATen/ops/msort_ops.h>
#include <ATen/ops/msort_native.h>
#include <ATen/ops/msort_ops.h>
#include <ATen/ops/argsort_native.h>
#include <ATen/ops/argsort_ops.h>
#include <ATen/ops/argsort_native.h>
#include <ATen/ops/argsort_ops.h>
#include <ATen/ops/topk_native.h>
#include <ATen/ops/topk_ops.h>
#include <ATen/ops/topk_native.h>
#include <ATen/ops/topk_ops.h>
#include <ATen/ops/all_native.h>
#include <ATen/ops/all_ops.h>
#include <ATen/ops/all_native.h>
#include <ATen/ops/all_ops.h>
#include <ATen/ops/any_native.h>
#include <ATen/ops/any_ops.h>
#include <ATen/ops/any_native.h>
#include <ATen/ops/any_ops.h>
#include <ATen/ops/renorm_native.h>
#include <ATen/ops/renorm_ops.h>
#include <ATen/ops/renorm_native.h>
#include <ATen/ops/renorm_ops.h>
#include <ATen/ops/renorm_native.h>
#include <ATen/ops/renorm_ops.h>
#include <ATen/ops/unfold_backward_native.h>
#include <ATen/ops/unfold_backward_ops.h>
#include <ATen/ops/unfold_backward_native.h>
#include <ATen/ops/unfold_backward_ops.h>
#include <ATen/ops/pow_native.h>
#include <ATen/ops/pow_ops.h>
#include <ATen/ops/pow_native.h>
#include <ATen/ops/pow_ops.h>
#include <ATen/ops/pow_native.h>
#include <ATen/ops/pow_ops.h>
#include <ATen/ops/pow_native.h>
#include <ATen/ops/pow_ops.h>
#include <ATen/ops/pow_native.h>
#include <ATen/ops/pow_ops.h>
#include <ATen/ops/pow_native.h>
#include <ATen/ops/pow_ops.h>
#include <ATen/ops/pow_native.h>
#include <ATen/ops/pow_ops.h>
#include <ATen/ops/pow_native.h>
#include <ATen/ops/pow_ops.h>
#include <ATen/ops/float_power_native.h>
#include <ATen/ops/float_power_ops.h>
#include <ATen/ops/float_power_native.h>
#include <ATen/ops/float_power_ops.h>
#include <ATen/ops/float_power_native.h>
#include <ATen/ops/float_power_ops.h>
#include <ATen/ops/float_power_native.h>
#include <ATen/ops/float_power_ops.h>
#include <ATen/ops/float_power_native.h>
#include <ATen/ops/float_power_ops.h>
#include <ATen/ops/float_power_native.h>
#include <ATen/ops/float_power_ops.h>
#include <ATen/ops/float_power_native.h>
#include <ATen/ops/float_power_ops.h>
#include <ATen/ops/float_power_native.h>
#include <ATen/ops/float_power_ops.h>
#include <ATen/ops/normal_native.h>
#include <ATen/ops/normal_ops.h>
#include <ATen/ops/normal_native.h>
#include <ATen/ops/normal_ops.h>
#include <ATen/ops/normal_native.h>
#include <ATen/ops/normal_ops.h>
#include <ATen/ops/normal_native.h>
#include <ATen/ops/normal_ops.h>
#include <ATen/ops/normal_native.h>
#include <ATen/ops/normal_ops.h>
#include <ATen/ops/normal_native.h>
#include <ATen/ops/normal_ops.h>
#include <ATen/ops/normal_native.h>
#include <ATen/ops/normal_ops.h>
#include <ATen/ops/normal_native.h>
#include <ATen/ops/normal_ops.h>
#include <ATen/ops/normal_native.h>
#include <ATen/ops/normal_ops.h>
#include <ATen/ops/normal_native.h>
#include <ATen/ops/normal_ops.h>
#include <ATen/ops/normal_native.h>
#include <ATen/ops/normal_ops.h>
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_native.h>
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_ops.h>
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_native.h>
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_ops.h>
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_native.h>
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_ops.h>
#include <ATen/ops/_amp_update_scale_native.h>
#include <ATen/ops/_amp_update_scale_ops.h>
#include <ATen/ops/_amp_update_scale_native.h>
#include <ATen/ops/_amp_update_scale_ops.h>
#include <ATen/ops/_amp_update_scale_native.h>
#include <ATen/ops/_amp_update_scale_ops.h>
#include <ATen/ops/_foreach_add_native.h>
#include <ATen/ops/_foreach_add_ops.h>
#include <ATen/ops/_foreach_add_native.h>
#include <ATen/ops/_foreach_add_ops.h>
#include <ATen/ops/_foreach_add_native.h>
#include <ATen/ops/_foreach_add_ops.h>
#include <ATen/ops/_foreach_add_native.h>
#include <ATen/ops/_foreach_add_ops.h>
#include <ATen/ops/_foreach_add_native.h>
#include <ATen/ops/_foreach_add_ops.h>
#include <ATen/ops/_foreach_add_native.h>
#include <ATen/ops/_foreach_add_ops.h>
#include <ATen/ops/_foreach_add_native.h>
#include <ATen/ops/_foreach_add_ops.h>
#include <ATen/ops/_foreach_add_native.h>
#include <ATen/ops/_foreach_add_ops.h>
#include <ATen/ops/_foreach_add_native.h>
#include <ATen/ops/_foreach_add_ops.h>
#include <ATen/ops/_foreach_add_native.h>
#include <ATen/ops/_foreach_add_ops.h>
#include <ATen/ops/_foreach_add_native.h>
#include <ATen/ops/_foreach_add_ops.h>
#include <ATen/ops/_foreach_add_native.h>
#include <ATen/ops/_foreach_add_ops.h>
#include <ATen/ops/_foreach_sub_native.h>
#include <ATen/ops/_foreach_sub_ops.h>
#include <ATen/ops/_foreach_sub_native.h>
#include <ATen/ops/_foreach_sub_ops.h>
#include <ATen/ops/_foreach_sub_native.h>
#include <ATen/ops/_foreach_sub_ops.h>
#include <ATen/ops/_foreach_sub_native.h>
#include <ATen/ops/_foreach_sub_ops.h>
#include <ATen/ops/_foreach_sub_native.h>
#include <ATen/ops/_foreach_sub_ops.h>
#include <ATen/ops/_foreach_sub_native.h>
#include <ATen/ops/_foreach_sub_ops.h>
#include <ATen/ops/_foreach_sub_native.h>
#include <ATen/ops/_foreach_sub_ops.h>
#include <ATen/ops/_foreach_sub_native.h>
#include <ATen/ops/_foreach_sub_ops.h>
#include <ATen/ops/_foreach_sub_native.h>
#include <ATen/ops/_foreach_sub_ops.h>
#include <ATen/ops/_foreach_mul_native.h>
#include <ATen/ops/_foreach_mul_ops.h>
#include <ATen/ops/_foreach_mul_native.h>
#include <ATen/ops/_foreach_mul_ops.h>
#include <ATen/ops/_foreach_mul_native.h>
#include <ATen/ops/_foreach_mul_ops.h>
#include <ATen/ops/_foreach_mul_native.h>
#include <ATen/ops/_foreach_mul_ops.h>
#include <ATen/ops/_foreach_mul_native.h>
#include <ATen/ops/_foreach_mul_ops.h>
#include <ATen/ops/_foreach_mul_native.h>
#include <ATen/ops/_foreach_mul_ops.h>
#include <ATen/ops/_foreach_mul_native.h>
#include <ATen/ops/_foreach_mul_ops.h>
#include <ATen/ops/_foreach_mul_native.h>
#include <ATen/ops/_foreach_mul_ops.h>
#include <ATen/ops/_foreach_mul_native.h>
#include <ATen/ops/_foreach_mul_ops.h>
#include <ATen/ops/_foreach_mul_native.h>
#include <ATen/ops/_foreach_mul_ops.h>
#include <ATen/ops/_foreach_mul_native.h>
#include <ATen/ops/_foreach_mul_ops.h>
#include <ATen/ops/_foreach_mul_native.h>
#include <ATen/ops/_foreach_mul_ops.h>
#include <ATen/ops/_foreach_div_native.h>
#include <ATen/ops/_foreach_div_ops.h>
#include <ATen/ops/_foreach_div_native.h>
#include <ATen/ops/_foreach_div_ops.h>
#include <ATen/ops/_foreach_div_native.h>
#include <ATen/ops/_foreach_div_ops.h>
#include <ATen/ops/_foreach_div_native.h>
#include <ATen/ops/_foreach_div_ops.h>
#include <ATen/ops/_foreach_div_native.h>
#include <ATen/ops/_foreach_div_ops.h>
#include <ATen/ops/_foreach_div_native.h>
#include <ATen/ops/_foreach_div_ops.h>
#include <ATen/ops/_foreach_div_native.h>
#include <ATen/ops/_foreach_div_ops.h>
#include <ATen/ops/_foreach_div_native.h>
#include <ATen/ops/_foreach_div_ops.h>
#include <ATen/ops/_foreach_div_native.h>
#include <ATen/ops/_foreach_div_ops.h>
#include <ATen/ops/_foreach_div_native.h>
#include <ATen/ops/_foreach_div_ops.h>
#include <ATen/ops/_foreach_div_native.h>
#include <ATen/ops/_foreach_div_ops.h>
#include <ATen/ops/_foreach_div_native.h>
#include <ATen/ops/_foreach_div_ops.h>
#include <ATen/ops/_foreach_clamp_max_native.h>
#include <ATen/ops/_foreach_clamp_max_ops.h>
#include <ATen/ops/_foreach_clamp_max_native.h>
#include <ATen/ops/_foreach_clamp_max_ops.h>
#include <ATen/ops/_foreach_clamp_max_native.h>
#include <ATen/ops/_foreach_clamp_max_ops.h>
#include <ATen/ops/_foreach_clamp_max_native.h>
#include <ATen/ops/_foreach_clamp_max_ops.h>
#include <ATen/ops/_foreach_clamp_max_native.h>
#include <ATen/ops/_foreach_clamp_max_ops.h>
#include <ATen/ops/_foreach_clamp_max_native.h>
#include <ATen/ops/_foreach_clamp_max_ops.h>
#include <ATen/ops/_foreach_clamp_max_native.h>
#include <ATen/ops/_foreach_clamp_max_ops.h>
#include <ATen/ops/_foreach_clamp_max_native.h>
#include <ATen/ops/_foreach_clamp_max_ops.h>
#include <ATen/ops/_foreach_clamp_max_native.h>
#include <ATen/ops/_foreach_clamp_max_ops.h>
#include <ATen/ops/_foreach_clamp_min_native.h>
#include <ATen/ops/_foreach_clamp_min_ops.h>
#include <ATen/ops/_foreach_clamp_min_native.h>
#include <ATen/ops/_foreach_clamp_min_ops.h>
#include <ATen/ops/_foreach_clamp_min_native.h>
#include <ATen/ops/_foreach_clamp_min_ops.h>
#include <ATen/ops/_foreach_clamp_min_native.h>
#include <ATen/ops/_foreach_clamp_min_ops.h>
#include <ATen/ops/_foreach_clamp_min_native.h>
#include <ATen/ops/_foreach_clamp_min_ops.h>
#include <ATen/ops/_foreach_clamp_min_native.h>
#include <ATen/ops/_foreach_clamp_min_ops.h>
#include <ATen/ops/_foreach_clamp_min_native.h>
#include <ATen/ops/_foreach_clamp_min_ops.h>
#include <ATen/ops/_foreach_clamp_min_native.h>
#include <ATen/ops/_foreach_clamp_min_ops.h>
#include <ATen/ops/_foreach_clamp_min_native.h>
#include <ATen/ops/_foreach_clamp_min_ops.h>
#include <ATen/ops/_foreach_maximum_native.h>
#include <ATen/ops/_foreach_maximum_ops.h>
#include <ATen/ops/_foreach_maximum_native.h>
#include <ATen/ops/_foreach_maximum_ops.h>
#include <ATen/ops/_foreach_maximum_native.h>
#include <ATen/ops/_foreach_maximum_ops.h>
#include <ATen/ops/_foreach_maximum_native.h>
#include <ATen/ops/_foreach_maximum_ops.h>
#include <ATen/ops/_foreach_maximum_native.h>
#include <ATen/ops/_foreach_maximum_ops.h>
#include <ATen/ops/_foreach_maximum_native.h>
#include <ATen/ops/_foreach_maximum_ops.h>
#include <ATen/ops/_foreach_maximum_native.h>
#include <ATen/ops/_foreach_maximum_ops.h>
#include <ATen/ops/_foreach_maximum_native.h>
#include <ATen/ops/_foreach_maximum_ops.h>
#include <ATen/ops/_foreach_maximum_native.h>
#include <ATen/ops/_foreach_maximum_ops.h>
#include <ATen/ops/_foreach_minimum_native.h>
#include <ATen/ops/_foreach_minimum_ops.h>
#include <ATen/ops/_foreach_minimum_native.h>
#include <ATen/ops/_foreach_minimum_ops.h>
#include <ATen/ops/_foreach_minimum_native.h>
#include <ATen/ops/_foreach_minimum_ops.h>
#include <ATen/ops/_foreach_minimum_native.h>
#include <ATen/ops/_foreach_minimum_ops.h>
#include <ATen/ops/_foreach_minimum_native.h>
#include <ATen/ops/_foreach_minimum_ops.h>
#include <ATen/ops/_foreach_minimum_native.h>
#include <ATen/ops/_foreach_minimum_ops.h>
#include <ATen/ops/_foreach_minimum_native.h>
#include <ATen/ops/_foreach_minimum_ops.h>
#include <ATen/ops/_foreach_minimum_native.h>
#include <ATen/ops/_foreach_minimum_ops.h>
#include <ATen/ops/_foreach_minimum_native.h>
#include <ATen/ops/_foreach_minimum_ops.h>
#include <ATen/ops/_foreach_addcdiv_native.h>
#include <ATen/ops/_foreach_addcdiv_ops.h>
#include <ATen/ops/_foreach_addcdiv_native.h>
#include <ATen/ops/_foreach_addcdiv_ops.h>
#include <ATen/ops/_foreach_addcdiv_native.h>
#include <ATen/ops/_foreach_addcdiv_ops.h>
#include <ATen/ops/_foreach_addcdiv_native.h>
#include <ATen/ops/_foreach_addcdiv_ops.h>
#include <ATen/ops/_foreach_addcdiv_native.h>
#include <ATen/ops/_foreach_addcdiv_ops.h>
#include <ATen/ops/_foreach_addcdiv_native.h>
#include <ATen/ops/_foreach_addcdiv_ops.h>
#include <ATen/ops/_foreach_addcdiv_native.h>
#include <ATen/ops/_foreach_addcdiv_ops.h>
#include <ATen/ops/_foreach_addcdiv_native.h>
#include <ATen/ops/_foreach_addcdiv_ops.h>
#include <ATen/ops/_foreach_addcdiv_native.h>
#include <ATen/ops/_foreach_addcdiv_ops.h>
#include <ATen/ops/_foreach_addcmul_native.h>
#include <ATen/ops/_foreach_addcmul_ops.h>
#include <ATen/ops/_foreach_addcmul_native.h>
#include <ATen/ops/_foreach_addcmul_ops.h>
#include <ATen/ops/_foreach_addcmul_native.h>
#include <ATen/ops/_foreach_addcmul_ops.h>
#include <ATen/ops/_foreach_addcmul_native.h>
#include <ATen/ops/_foreach_addcmul_ops.h>
#include <ATen/ops/_foreach_addcmul_native.h>
#include <ATen/ops/_foreach_addcmul_ops.h>
#include <ATen/ops/_foreach_addcmul_native.h>
#include <ATen/ops/_foreach_addcmul_ops.h>
#include <ATen/ops/_foreach_addcmul_native.h>
#include <ATen/ops/_foreach_addcmul_ops.h>
#include <ATen/ops/_foreach_addcmul_native.h>
#include <ATen/ops/_foreach_addcmul_ops.h>
#include <ATen/ops/_foreach_addcmul_native.h>
#include <ATen/ops/_foreach_addcmul_ops.h>
#include <ATen/ops/_foreach_abs_native.h>
#include <ATen/ops/_foreach_abs_ops.h>
#include <ATen/ops/_foreach_abs_native.h>
#include <ATen/ops/_foreach_abs_ops.h>
#include <ATen/ops/_foreach_abs_native.h>
#include <ATen/ops/_foreach_abs_ops.h>
#include <ATen/ops/_foreach_acos_native.h>
#include <ATen/ops/_foreach_acos_ops.h>
#include <ATen/ops/_foreach_acos_native.h>
#include <ATen/ops/_foreach_acos_ops.h>
#include <ATen/ops/_foreach_acos_native.h>
#include <ATen/ops/_foreach_acos_ops.h>
#include <ATen/ops/_foreach_asin_native.h>
#include <ATen/ops/_foreach_asin_ops.h>
#include <ATen/ops/_foreach_asin_native.h>
#include <ATen/ops/_foreach_asin_ops.h>
#include <ATen/ops/_foreach_asin_native.h>
#include <ATen/ops/_foreach_asin_ops.h>
#include <ATen/ops/_foreach_atan_native.h>
#include <ATen/ops/_foreach_atan_ops.h>
#include <ATen/ops/_foreach_atan_native.h>
#include <ATen/ops/_foreach_atan_ops.h>
#include <ATen/ops/_foreach_atan_native.h>
#include <ATen/ops/_foreach_atan_ops.h>
#include <ATen/ops/_foreach_ceil_native.h>
#include <ATen/ops/_foreach_ceil_ops.h>
#include <ATen/ops/_foreach_ceil_native.h>
#include <ATen/ops/_foreach_ceil_ops.h>
#include <ATen/ops/_foreach_ceil_native.h>
#include <ATen/ops/_foreach_ceil_ops.h>
#include <ATen/ops/_foreach_cos_native.h>
#include <ATen/ops/_foreach_cos_ops.h>
#include <ATen/ops/_foreach_cos_native.h>
#include <ATen/ops/_foreach_cos_ops.h>
#include <ATen/ops/_foreach_cos_native.h>
#include <ATen/ops/_foreach_cos_ops.h>
#include <ATen/ops/_foreach_cosh_native.h>
#include <ATen/ops/_foreach_cosh_ops.h>
#include <ATen/ops/_foreach_cosh_native.h>
#include <ATen/ops/_foreach_cosh_ops.h>
#include <ATen/ops/_foreach_cosh_native.h>
#include <ATen/ops/_foreach_cosh_ops.h>
#include <ATen/ops/_foreach_erf_native.h>
#include <ATen/ops/_foreach_erf_ops.h>
#include <ATen/ops/_foreach_erf_native.h>
#include <ATen/ops/_foreach_erf_ops.h>
#include <ATen/ops/_foreach_erf_native.h>
#include <ATen/ops/_foreach_erf_ops.h>
#include <ATen/ops/_foreach_erfc_native.h>
#include <ATen/ops/_foreach_erfc_ops.h>
#include <ATen/ops/_foreach_erfc_native.h>
#include <ATen/ops/_foreach_erfc_ops.h>
#include <ATen/ops/_foreach_erfc_native.h>
#include <ATen/ops/_foreach_erfc_ops.h>
#include <ATen/ops/_foreach_exp_native.h>
#include <ATen/ops/_foreach_exp_ops.h>
#include <ATen/ops/_foreach_exp_native.h>
#include <ATen/ops/_foreach_exp_ops.h>
#include <ATen/ops/_foreach_exp_native.h>
#include <ATen/ops/_foreach_exp_ops.h>
#include <ATen/ops/_foreach_expm1_native.h>
#include <ATen/ops/_foreach_expm1_ops.h>
#include <ATen/ops/_foreach_expm1_native.h>
#include <ATen/ops/_foreach_expm1_ops.h>
#include <ATen/ops/_foreach_expm1_native.h>
#include <ATen/ops/_foreach_expm1_ops.h>
#include <ATen/ops/_foreach_floor_native.h>
#include <ATen/ops/_foreach_floor_ops.h>
#include <ATen/ops/_foreach_floor_native.h>
#include <ATen/ops/_foreach_floor_ops.h>
#include <ATen/ops/_foreach_floor_native.h>
#include <ATen/ops/_foreach_floor_ops.h>
#include <ATen/ops/_foreach_frac_native.h>
#include <ATen/ops/_foreach_frac_ops.h>
#include <ATen/ops/_foreach_frac_native.h>
#include <ATen/ops/_foreach_frac_ops.h>
#include <ATen/ops/_foreach_frac_native.h>
#include <ATen/ops/_foreach_frac_ops.h>
#include <ATen/ops/_foreach_lerp_native.h>
#include <ATen/ops/_foreach_lerp_ops.h>
#include <ATen/ops/_foreach_lerp_native.h>
#include <ATen/ops/_foreach_lerp_ops.h>
#include <ATen/ops/_foreach_lerp_native.h>
#include <ATen/ops/_foreach_lerp_ops.h>
#include <ATen/ops/_foreach_lerp_native.h>
#include <ATen/ops/_foreach_lerp_ops.h>
#include <ATen/ops/_foreach_lerp_native.h>
#include <ATen/ops/_foreach_lerp_ops.h>
#include <ATen/ops/_foreach_lerp_native.h>
#include <ATen/ops/_foreach_lerp_ops.h>
#include <ATen/ops/_foreach_lerp_native.h>
#include <ATen/ops/_foreach_lerp_ops.h>
#include <ATen/ops/_foreach_lerp_native.h>
#include <ATen/ops/_foreach_lerp_ops.h>
#include <ATen/ops/_foreach_lerp_native.h>
#include <ATen/ops/_foreach_lerp_ops.h>
#include <ATen/ops/_foreach_lgamma_native.h>
#include <ATen/ops/_foreach_lgamma_ops.h>
#include <ATen/ops/_foreach_lgamma_native.h>
#include <ATen/ops/_foreach_lgamma_ops.h>
#include <ATen/ops/_foreach_lgamma_native.h>
#include <ATen/ops/_foreach_lgamma_ops.h>
#include <ATen/ops/_foreach_log_native.h>
#include <ATen/ops/_foreach_log_ops.h>
#include <ATen/ops/_foreach_log_native.h>
#include <ATen/ops/_foreach_log_ops.h>
#include <ATen/ops/_foreach_log_native.h>
#include <ATen/ops/_foreach_log_ops.h>
#include <ATen/ops/_foreach_log10_native.h>
#include <ATen/ops/_foreach_log10_ops.h>
#include <ATen/ops/_foreach_log10_native.h>
#include <ATen/ops/_foreach_log10_ops.h>
#include <ATen/ops/_foreach_log10_native.h>
#include <ATen/ops/_foreach_log10_ops.h>
#include <ATen/ops/_foreach_log1p_native.h>
#include <ATen/ops/_foreach_log1p_ops.h>
#include <ATen/ops/_foreach_log1p_native.h>
#include <ATen/ops/_foreach_log1p_ops.h>
#include <ATen/ops/_foreach_log1p_native.h>
#include <ATen/ops/_foreach_log1p_ops.h>
#include <ATen/ops/_foreach_log2_native.h>
#include <ATen/ops/_foreach_log2_ops.h>
#include <ATen/ops/_foreach_log2_native.h>
#include <ATen/ops/_foreach_log2_ops.h>
#include <ATen/ops/_foreach_log2_native.h>
#include <ATen/ops/_foreach_log2_ops.h>
#include <ATen/ops/_foreach_max_native.h>
#include <ATen/ops/_foreach_max_ops.h>
#include <ATen/ops/_foreach_max_native.h>
#include <ATen/ops/_foreach_max_ops.h>
#include <ATen/ops/_foreach_neg_native.h>
#include <ATen/ops/_foreach_neg_ops.h>
#include <ATen/ops/_foreach_neg_native.h>
#include <ATen/ops/_foreach_neg_ops.h>
#include <ATen/ops/_foreach_neg_native.h>
#include <ATen/ops/_foreach_neg_ops.h>
#include <ATen/ops/_foreach_norm_native.h>
#include <ATen/ops/_foreach_norm_ops.h>
#include <ATen/ops/_foreach_norm_native.h>
#include <ATen/ops/_foreach_norm_ops.h>
#include <ATen/ops/_foreach_pow_native.h>
#include <ATen/ops/_foreach_pow_ops.h>
#include <ATen/ops/_foreach_pow_native.h>
#include <ATen/ops/_foreach_pow_ops.h>
#include <ATen/ops/_foreach_pow_native.h>
#include <ATen/ops/_foreach_pow_ops.h>
#include <ATen/ops/_foreach_pow_native.h>
#include <ATen/ops/_foreach_pow_ops.h>
#include <ATen/ops/_foreach_pow_native.h>
#include <ATen/ops/_foreach_pow_ops.h>
#include <ATen/ops/_foreach_pow_native.h>
#include <ATen/ops/_foreach_pow_ops.h>
#include <ATen/ops/_foreach_pow_native.h>
#include <ATen/ops/_foreach_pow_ops.h>
#include <ATen/ops/_foreach_pow_native.h>
#include <ATen/ops/_foreach_pow_ops.h>
#include <ATen/ops/_foreach_pow_native.h>
#include <ATen/ops/_foreach_pow_ops.h>
#include <ATen/ops/_foreach_reciprocal_native.h>
#include <ATen/ops/_foreach_reciprocal_ops.h>
#include <ATen/ops/_foreach_reciprocal_native.h>
#include <ATen/ops/_foreach_reciprocal_ops.h>
#include <ATen/ops/_foreach_reciprocal_native.h>
#include <ATen/ops/_foreach_reciprocal_ops.h>
#include <ATen/ops/_foreach_round_native.h>
#include <ATen/ops/_foreach_round_ops.h>
#include <ATen/ops/_foreach_round_native.h>
#include <ATen/ops/_foreach_round_ops.h>
#include <ATen/ops/_foreach_round_native.h>
#include <ATen/ops/_foreach_round_ops.h>
#include <ATen/ops/_foreach_rsqrt_native.h>
#include <ATen/ops/_foreach_rsqrt_ops.h>
#include <ATen/ops/_foreach_rsqrt_native.h>
#include <ATen/ops/_foreach_rsqrt_ops.h>
#include <ATen/ops/_foreach_rsqrt_native.h>
#include <ATen/ops/_foreach_rsqrt_ops.h>
#include <ATen/ops/_foreach_sigmoid_native.h>
#include <ATen/ops/_foreach_sigmoid_ops.h>
#include <ATen/ops/_foreach_sigmoid_native.h>
#include <ATen/ops/_foreach_sigmoid_ops.h>
#include <ATen/ops/_foreach_sigmoid_native.h>
#include <ATen/ops/_foreach_sigmoid_ops.h>
#include <ATen/ops/_foreach_sign_native.h>
#include <ATen/ops/_foreach_sign_ops.h>
#include <ATen/ops/_foreach_sign_native.h>
#include <ATen/ops/_foreach_sign_ops.h>
#include <ATen/ops/_foreach_sign_native.h>
#include <ATen/ops/_foreach_sign_ops.h>
#include <ATen/ops/_foreach_sin_native.h>
#include <ATen/ops/_foreach_sin_ops.h>
#include <ATen/ops/_foreach_sin_native.h>
#include <ATen/ops/_foreach_sin_ops.h>
#include <ATen/ops/_foreach_sin_native.h>
#include <ATen/ops/_foreach_sin_ops.h>
#include <ATen/ops/_foreach_sinh_native.h>
#include <ATen/ops/_foreach_sinh_ops.h>
#include <ATen/ops/_foreach_sinh_native.h>
#include <ATen/ops/_foreach_sinh_ops.h>
#include <ATen/ops/_foreach_sinh_native.h>
#include <ATen/ops/_foreach_sinh_ops.h>
#include <ATen/ops/_foreach_sqrt_native.h>
#include <ATen/ops/_foreach_sqrt_ops.h>
#include <ATen/ops/_foreach_sqrt_native.h>
#include <ATen/ops/_foreach_sqrt_ops.h>
#include <ATen/ops/_foreach_sqrt_native.h>
#include <ATen/ops/_foreach_sqrt_ops.h>
#include <ATen/ops/_foreach_tan_native.h>
#include <ATen/ops/_foreach_tan_ops.h>
#include <ATen/ops/_foreach_tan_native.h>
#include <ATen/ops/_foreach_tan_ops.h>
#include <ATen/ops/_foreach_tan_native.h>
#include <ATen/ops/_foreach_tan_ops.h>
#include <ATen/ops/_foreach_tanh_native.h>
#include <ATen/ops/_foreach_tanh_ops.h>
#include <ATen/ops/_foreach_tanh_native.h>
#include <ATen/ops/_foreach_tanh_ops.h>
#include <ATen/ops/_foreach_tanh_native.h>
#include <ATen/ops/_foreach_tanh_ops.h>
#include <ATen/ops/_foreach_trunc_native.h>
#include <ATen/ops/_foreach_trunc_ops.h>
#include <ATen/ops/_foreach_trunc_native.h>
#include <ATen/ops/_foreach_trunc_ops.h>
#include <ATen/ops/_foreach_trunc_native.h>
#include <ATen/ops/_foreach_trunc_ops.h>
#include <ATen/ops/_foreach_zero_native.h>
#include <ATen/ops/_foreach_zero_ops.h>
#include <ATen/ops/_foreach_zero_native.h>
#include <ATen/ops/_foreach_zero_ops.h>
#include <ATen/ops/_foreach_zero_native.h>
#include <ATen/ops/_foreach_zero_ops.h>
#include <ATen/ops/_foreach_copy_native.h>
#include <ATen/ops/_foreach_copy_ops.h>
#include <ATen/ops/_foreach_copy_native.h>
#include <ATen/ops/_foreach_copy_ops.h>
#include <ATen/ops/_foreach_copy_native.h>
#include <ATen/ops/_foreach_copy_ops.h>
#include <ATen/ops/bucketize_native.h>
#include <ATen/ops/bucketize_ops.h>
#include <ATen/ops/bucketize_native.h>
#include <ATen/ops/bucketize_ops.h>
#include <ATen/ops/bucketize_native.h>
#include <ATen/ops/bucketize_ops.h>
#include <ATen/ops/bucketize_native.h>
#include <ATen/ops/bucketize_ops.h>
#include <ATen/ops/searchsorted_native.h>
#include <ATen/ops/searchsorted_ops.h>
#include <ATen/ops/searchsorted_native.h>
#include <ATen/ops/searchsorted_ops.h>
#include <ATen/ops/searchsorted_native.h>
#include <ATen/ops/searchsorted_ops.h>
#include <ATen/ops/searchsorted_native.h>
#include <ATen/ops/searchsorted_ops.h>
#include <ATen/ops/_convert_indices_from_coo_to_csr_native.h>
#include <ATen/ops/_convert_indices_from_coo_to_csr_ops.h>
#include <ATen/ops/_convert_indices_from_coo_to_csr_native.h>
#include <ATen/ops/_convert_indices_from_coo_to_csr_ops.h>
#include <ATen/ops/_convert_indices_from_csr_to_coo_native.h>
#include <ATen/ops/_convert_indices_from_csr_to_coo_ops.h>
#include <ATen/ops/_convert_indices_from_csr_to_coo_native.h>
#include <ATen/ops/_convert_indices_from_csr_to_coo_ops.h>
#include <ATen/ops/mse_loss_native.h>
#include <ATen/ops/mse_loss_ops.h>
#include <ATen/ops/mse_loss_native.h>
#include <ATen/ops/mse_loss_ops.h>
#include <ATen/ops/mse_loss_backward_native.h>
#include <ATen/ops/mse_loss_backward_ops.h>
#include <ATen/ops/mse_loss_backward_native.h>
#include <ATen/ops/mse_loss_backward_ops.h>
#include <ATen/ops/multi_margin_loss_native.h>
#include <ATen/ops/multi_margin_loss_ops.h>
#include <ATen/ops/multi_margin_loss_native.h>
#include <ATen/ops/multi_margin_loss_ops.h>
#include <ATen/ops/multi_margin_loss_backward_native.h>
#include <ATen/ops/multi_margin_loss_backward_ops.h>
#include <ATen/ops/multi_margin_loss_backward_native.h>
#include <ATen/ops/multi_margin_loss_backward_ops.h>
#include <ATen/ops/multilabel_margin_loss_native.h>
#include <ATen/ops/multilabel_margin_loss_ops.h>
#include <ATen/ops/multilabel_margin_loss_native.h>
#include <ATen/ops/multilabel_margin_loss_ops.h>
#include <ATen/ops/multilabel_margin_loss_forward_native.h>
#include <ATen/ops/multilabel_margin_loss_forward_ops.h>
#include <ATen/ops/multilabel_margin_loss_forward_native.h>
#include <ATen/ops/multilabel_margin_loss_forward_ops.h>
#include <ATen/ops/multilabel_margin_loss_backward_native.h>
#include <ATen/ops/multilabel_margin_loss_backward_ops.h>
#include <ATen/ops/multilabel_margin_loss_backward_native.h>
#include <ATen/ops/multilabel_margin_loss_backward_ops.h>
#include <ATen/ops/nll_loss_native.h>
#include <ATen/ops/nll_loss_ops.h>
#include <ATen/ops/nll_loss_native.h>
#include <ATen/ops/nll_loss_ops.h>
#include <ATen/ops/nll_loss_forward_native.h>
#include <ATen/ops/nll_loss_forward_ops.h>
#include <ATen/ops/nll_loss_forward_native.h>
#include <ATen/ops/nll_loss_forward_ops.h>
#include <ATen/ops/nll_loss_backward_native.h>
#include <ATen/ops/nll_loss_backward_ops.h>
#include <ATen/ops/nll_loss_backward_native.h>
#include <ATen/ops/nll_loss_backward_ops.h>
#include <ATen/ops/nll_loss2d_native.h>
#include <ATen/ops/nll_loss2d_ops.h>
#include <ATen/ops/nll_loss2d_native.h>
#include <ATen/ops/nll_loss2d_ops.h>
#include <ATen/ops/nll_loss2d_forward_native.h>
#include <ATen/ops/nll_loss2d_forward_ops.h>
#include <ATen/ops/nll_loss2d_forward_native.h>
#include <ATen/ops/nll_loss2d_forward_ops.h>
#include <ATen/ops/nll_loss2d_backward_native.h>
#include <ATen/ops/nll_loss2d_backward_ops.h>
#include <ATen/ops/nll_loss2d_backward_native.h>
#include <ATen/ops/nll_loss2d_backward_ops.h>
#include <ATen/ops/smooth_l1_loss_native.h>
#include <ATen/ops/smooth_l1_loss_ops.h>
#include <ATen/ops/smooth_l1_loss_native.h>
#include <ATen/ops/smooth_l1_loss_ops.h>
#include <ATen/ops/smooth_l1_loss_backward_native.h>
#include <ATen/ops/smooth_l1_loss_backward_ops.h>
#include <ATen/ops/smooth_l1_loss_backward_native.h>
#include <ATen/ops/smooth_l1_loss_backward_ops.h>
#include <ATen/ops/huber_loss_native.h>
#include <ATen/ops/huber_loss_ops.h>
#include <ATen/ops/huber_loss_native.h>
#include <ATen/ops/huber_loss_ops.h>
#include <ATen/ops/huber_loss_backward_native.h>
#include <ATen/ops/huber_loss_backward_ops.h>
#include <ATen/ops/huber_loss_backward_native.h>
#include <ATen/ops/huber_loss_backward_ops.h>
#include <ATen/ops/soft_margin_loss_native.h>
#include <ATen/ops/soft_margin_loss_ops.h>
#include <ATen/ops/soft_margin_loss_native.h>
#include <ATen/ops/soft_margin_loss_ops.h>
#include <ATen/ops/soft_margin_loss_backward_native.h>
#include <ATen/ops/soft_margin_loss_backward_ops.h>
#include <ATen/ops/soft_margin_loss_backward_native.h>
#include <ATen/ops/soft_margin_loss_backward_ops.h>
#include <ATen/ops/elu_native.h>
#include <ATen/ops/elu_ops.h>
#include <ATen/ops/elu_native.h>
#include <ATen/ops/elu_ops.h>
#include <ATen/ops/elu_native.h>
#include <ATen/ops/elu_ops.h>
#include <ATen/ops/elu_backward_native.h>
#include <ATen/ops/elu_backward_ops.h>
#include <ATen/ops/elu_backward_native.h>
#include <ATen/ops/elu_backward_ops.h>
#include <ATen/ops/glu_native.h>
#include <ATen/ops/glu_ops.h>
#include <ATen/ops/glu_native.h>
#include <ATen/ops/glu_ops.h>
#include <ATen/ops/glu_backward_native.h>
#include <ATen/ops/glu_backward_ops.h>
#include <ATen/ops/glu_backward_native.h>
#include <ATen/ops/glu_backward_ops.h>
#include <ATen/ops/glu_jvp_native.h>
#include <ATen/ops/glu_jvp_ops.h>
#include <ATen/ops/glu_jvp_native.h>
#include <ATen/ops/glu_jvp_ops.h>
#include <ATen/ops/glu_backward_jvp_native.h>
#include <ATen/ops/glu_backward_jvp_ops.h>
#include <ATen/ops/glu_backward_jvp_native.h>
#include <ATen/ops/glu_backward_jvp_ops.h>
#include <ATen/ops/hardsigmoid_native.h>
#include <ATen/ops/hardsigmoid_ops.h>
#include <ATen/ops/hardsigmoid_native.h>
#include <ATen/ops/hardsigmoid_ops.h>
#include <ATen/ops/hardsigmoid_native.h>
#include <ATen/ops/hardsigmoid_ops.h>
#include <ATen/ops/hardsigmoid_backward_native.h>
#include <ATen/ops/hardsigmoid_backward_ops.h>
#include <ATen/ops/hardsigmoid_backward_native.h>
#include <ATen/ops/hardsigmoid_backward_ops.h>
#include <ATen/ops/hardtanh_native.h>
#include <ATen/ops/hardtanh_ops.h>
#include <ATen/ops/hardtanh_native.h>
#include <ATen/ops/hardtanh_ops.h>
#include <ATen/ops/hardtanh_native.h>
#include <ATen/ops/hardtanh_ops.h>
#include <ATen/ops/hardtanh_backward_native.h>
#include <ATen/ops/hardtanh_backward_ops.h>
#include <ATen/ops/hardtanh_backward_native.h>
#include <ATen/ops/hardtanh_backward_ops.h>
#include <ATen/ops/hardswish_native.h>
#include <ATen/ops/hardswish_ops.h>
#include <ATen/ops/hardswish_native.h>
#include <ATen/ops/hardswish_ops.h>
#include <ATen/ops/hardswish_native.h>
#include <ATen/ops/hardswish_ops.h>
#include <ATen/ops/hardswish_backward_native.h>
#include <ATen/ops/hardswish_backward_ops.h>
#include <ATen/ops/hardswish_backward_native.h>
#include <ATen/ops/hardswish_backward_ops.h>
#include <ATen/ops/leaky_relu_native.h>
#include <ATen/ops/leaky_relu_ops.h>
#include <ATen/ops/leaky_relu_native.h>
#include <ATen/ops/leaky_relu_ops.h>
#include <ATen/ops/leaky_relu_native.h>
#include <ATen/ops/leaky_relu_ops.h>
#include <ATen/ops/leaky_relu_backward_native.h>
#include <ATen/ops/leaky_relu_backward_ops.h>
#include <ATen/ops/leaky_relu_backward_native.h>
#include <ATen/ops/leaky_relu_backward_ops.h>
#include <ATen/ops/log_sigmoid_native.h>
#include <ATen/ops/log_sigmoid_ops.h>
#include <ATen/ops/log_sigmoid_native.h>
#include <ATen/ops/log_sigmoid_ops.h>
#include <ATen/ops/log_sigmoid_forward_native.h>
#include <ATen/ops/log_sigmoid_forward_ops.h>
#include <ATen/ops/log_sigmoid_forward_native.h>
#include <ATen/ops/log_sigmoid_forward_ops.h>
#include <ATen/ops/log_sigmoid_backward_native.h>
#include <ATen/ops/log_sigmoid_backward_ops.h>
#include <ATen/ops/log_sigmoid_backward_native.h>
#include <ATen/ops/log_sigmoid_backward_ops.h>
#include <ATen/ops/rrelu_with_noise_native.h>
#include <ATen/ops/rrelu_with_noise_ops.h>
#include <ATen/ops/rrelu_with_noise_native.h>
#include <ATen/ops/rrelu_with_noise_ops.h>
#include <ATen/ops/rrelu_with_noise_native.h>
#include <ATen/ops/rrelu_with_noise_ops.h>
#include <ATen/ops/rrelu_with_noise_native.h>
#include <ATen/ops/rrelu_with_noise_ops.h>
#include <ATen/ops/rrelu_with_noise_backward_native.h>
#include <ATen/ops/rrelu_with_noise_backward_ops.h>
#include <ATen/ops/rrelu_with_noise_backward_native.h>
#include <ATen/ops/rrelu_with_noise_backward_ops.h>
#include <ATen/ops/softplus_native.h>
#include <ATen/ops/softplus_ops.h>
#include <ATen/ops/softplus_native.h>
#include <ATen/ops/softplus_ops.h>
#include <ATen/ops/softplus_backward_native.h>
#include <ATen/ops/softplus_backward_ops.h>
#include <ATen/ops/softplus_backward_native.h>
#include <ATen/ops/softplus_backward_ops.h>
#include <ATen/ops/softshrink_native.h>
#include <ATen/ops/softshrink_ops.h>
#include <ATen/ops/softshrink_native.h>
#include <ATen/ops/softshrink_ops.h>
#include <ATen/ops/softshrink_backward_native.h>
#include <ATen/ops/softshrink_backward_ops.h>
#include <ATen/ops/softshrink_backward_native.h>
#include <ATen/ops/softshrink_backward_ops.h>
#include <ATen/ops/adaptive_avg_pool2d_native.h>
#include <ATen/ops/adaptive_avg_pool2d_ops.h>
#include <ATen/ops/adaptive_avg_pool2d_native.h>
#include <ATen/ops/adaptive_avg_pool2d_ops.h>
#include <ATen/ops/mkldnn_adaptive_avg_pool2d_native.h>
#include <ATen/ops/mkldnn_adaptive_avg_pool2d_ops.h>
#include <ATen/ops/mkldnn_adaptive_avg_pool2d_native.h>
#include <ATen/ops/mkldnn_adaptive_avg_pool2d_ops.h>
#include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward_native.h>
#include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward_ops.h>
#include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward_native.h>
#include <ATen/ops/mkldnn_adaptive_avg_pool2d_backward_ops.h>
#include <ATen/ops/_adaptive_avg_pool2d_native.h>
#include <ATen/ops/_adaptive_avg_pool2d_ops.h>
#include <ATen/ops/_adaptive_avg_pool2d_native.h>
#include <ATen/ops/_adaptive_avg_pool2d_ops.h>
#include <ATen/ops/_adaptive_avg_pool2d_backward_native.h>
#include <ATen/ops/_adaptive_avg_pool2d_backward_ops.h>
#include <ATen/ops/_adaptive_avg_pool2d_backward_native.h>
#include <ATen/ops/_adaptive_avg_pool2d_backward_ops.h>
#include <ATen/ops/adaptive_avg_pool3d_native.h>
#include <ATen/ops/adaptive_avg_pool3d_ops.h>
#include <ATen/ops/adaptive_avg_pool3d_native.h>
#include <ATen/ops/adaptive_avg_pool3d_ops.h>
#include <ATen/ops/_adaptive_avg_pool3d_native.h>
#include <ATen/ops/_adaptive_avg_pool3d_ops.h>
#include <ATen/ops/_adaptive_avg_pool3d_native.h>
#include <ATen/ops/_adaptive_avg_pool3d_ops.h>
#include <ATen/ops/_adaptive_avg_pool3d_backward_native.h>
#include <ATen/ops/_adaptive_avg_pool3d_backward_ops.h>
#include <ATen/ops/_adaptive_avg_pool3d_backward_native.h>
#include <ATen/ops/_adaptive_avg_pool3d_backward_ops.h>
#include <ATen/ops/adaptive_max_pool2d_native.h>
#include <ATen/ops/adaptive_max_pool2d_ops.h>
#include <ATen/ops/adaptive_max_pool2d_native.h>
#include <ATen/ops/adaptive_max_pool2d_ops.h>
#include <ATen/ops/adaptive_max_pool2d_backward_native.h>
#include <ATen/ops/adaptive_max_pool2d_backward_ops.h>
#include <ATen/ops/adaptive_max_pool2d_backward_native.h>
#include <ATen/ops/adaptive_max_pool2d_backward_ops.h>
#include <ATen/ops/adaptive_max_pool3d_native.h>
#include <ATen/ops/adaptive_max_pool3d_ops.h>
#include <ATen/ops/adaptive_max_pool3d_native.h>
#include <ATen/ops/adaptive_max_pool3d_ops.h>
#include <ATen/ops/adaptive_max_pool3d_backward_native.h>
#include <ATen/ops/adaptive_max_pool3d_backward_ops.h>
#include <ATen/ops/adaptive_max_pool3d_backward_native.h>
#include <ATen/ops/adaptive_max_pool3d_backward_ops.h>
#include <ATen/ops/avg_pool2d_native.h>
#include <ATen/ops/avg_pool2d_ops.h>
#include <ATen/ops/avg_pool2d_native.h>
#include <ATen/ops/avg_pool2d_ops.h>
#include <ATen/ops/avg_pool2d_backward_native.h>
#include <ATen/ops/avg_pool2d_backward_ops.h>
#include <ATen/ops/avg_pool2d_backward_native.h>
#include <ATen/ops/avg_pool2d_backward_ops.h>
#include <ATen/ops/avg_pool3d_native.h>
#include <ATen/ops/avg_pool3d_ops.h>
#include <ATen/ops/avg_pool3d_native.h>
#include <ATen/ops/avg_pool3d_ops.h>
#include <ATen/ops/avg_pool3d_backward_native.h>
#include <ATen/ops/avg_pool3d_backward_ops.h>
#include <ATen/ops/avg_pool3d_backward_native.h>
#include <ATen/ops/avg_pool3d_backward_ops.h>
#include <ATen/ops/fractional_max_pool2d_native.h>
#include <ATen/ops/fractional_max_pool2d_ops.h>
#include <ATen/ops/fractional_max_pool2d_native.h>
#include <ATen/ops/fractional_max_pool2d_ops.h>
#include <ATen/ops/fractional_max_pool2d_backward_native.h>
#include <ATen/ops/fractional_max_pool2d_backward_ops.h>
#include <ATen/ops/fractional_max_pool2d_backward_native.h>
#include <ATen/ops/fractional_max_pool2d_backward_ops.h>
#include <ATen/ops/fractional_max_pool3d_native.h>
#include <ATen/ops/fractional_max_pool3d_ops.h>
#include <ATen/ops/fractional_max_pool3d_native.h>
#include <ATen/ops/fractional_max_pool3d_ops.h>
#include <ATen/ops/fractional_max_pool3d_backward_native.h>
#include <ATen/ops/fractional_max_pool3d_backward_ops.h>
#include <ATen/ops/fractional_max_pool3d_backward_native.h>
#include <ATen/ops/fractional_max_pool3d_backward_ops.h>
#include <ATen/ops/max_pool2d_with_indices_native.h>
#include <ATen/ops/max_pool2d_with_indices_ops.h>
#include <ATen/ops/max_pool2d_with_indices_native.h>
#include <ATen/ops/max_pool2d_with_indices_ops.h>
#include <ATen/ops/max_pool2d_with_indices_backward_native.h>
#include <ATen/ops/max_pool2d_with_indices_backward_ops.h>
#include <ATen/ops/max_pool2d_with_indices_backward_native.h>
#include <ATen/ops/max_pool2d_with_indices_backward_ops.h>
#include <ATen/ops/max_pool3d_with_indices_native.h>
#include <ATen/ops/max_pool3d_with_indices_ops.h>
#include <ATen/ops/max_pool3d_with_indices_native.h>
#include <ATen/ops/max_pool3d_with_indices_ops.h>
#include <ATen/ops/max_pool3d_with_indices_backward_native.h>
#include <ATen/ops/max_pool3d_with_indices_backward_ops.h>
#include <ATen/ops/max_pool3d_with_indices_backward_native.h>
#include <ATen/ops/max_pool3d_with_indices_backward_ops.h>
#include <ATen/ops/max_unpool2d_native.h>
#include <ATen/ops/max_unpool2d_ops.h>
#include <ATen/ops/max_unpool2d_native.h>
#include <ATen/ops/max_unpool2d_ops.h>
#include <ATen/ops/max_unpool3d_native.h>
#include <ATen/ops/max_unpool3d_ops.h>
#include <ATen/ops/max_unpool3d_native.h>
#include <ATen/ops/max_unpool3d_ops.h>
#include <ATen/ops/reflection_pad1d_native.h>
#include <ATen/ops/reflection_pad1d_ops.h>
#include <ATen/ops/reflection_pad1d_native.h>
#include <ATen/ops/reflection_pad1d_ops.h>
#include <ATen/ops/reflection_pad1d_backward_native.h>
#include <ATen/ops/reflection_pad1d_backward_ops.h>
#include <ATen/ops/reflection_pad1d_backward_native.h>
#include <ATen/ops/reflection_pad1d_backward_ops.h>
#include <ATen/ops/reflection_pad2d_native.h>
#include <ATen/ops/reflection_pad2d_ops.h>
#include <ATen/ops/reflection_pad2d_native.h>
#include <ATen/ops/reflection_pad2d_ops.h>
#include <ATen/ops/reflection_pad2d_backward_native.h>
#include <ATen/ops/reflection_pad2d_backward_ops.h>
#include <ATen/ops/reflection_pad2d_backward_native.h>
#include <ATen/ops/reflection_pad2d_backward_ops.h>
#include <ATen/ops/reflection_pad3d_native.h>
#include <ATen/ops/reflection_pad3d_ops.h>
#include <ATen/ops/reflection_pad3d_native.h>
#include <ATen/ops/reflection_pad3d_ops.h>
#include <ATen/ops/reflection_pad3d_backward_native.h>
#include <ATen/ops/reflection_pad3d_backward_ops.h>
#include <ATen/ops/reflection_pad3d_backward_native.h>
#include <ATen/ops/reflection_pad3d_backward_ops.h>
#include <ATen/ops/replication_pad1d_native.h>
#include <ATen/ops/replication_pad1d_ops.h>
#include <ATen/ops/replication_pad1d_native.h>
#include <ATen/ops/replication_pad1d_ops.h>
#include <ATen/ops/replication_pad1d_backward_native.h>
#include <ATen/ops/replication_pad1d_backward_ops.h>
#include <ATen/ops/replication_pad1d_backward_native.h>
#include <ATen/ops/replication_pad1d_backward_ops.h>
#include <ATen/ops/replication_pad2d_native.h>
#include <ATen/ops/replication_pad2d_ops.h>
#include <ATen/ops/replication_pad2d_native.h>
#include <ATen/ops/replication_pad2d_ops.h>
#include <ATen/ops/replication_pad2d_backward_native.h>
#include <ATen/ops/replication_pad2d_backward_ops.h>
#include <ATen/ops/replication_pad2d_backward_native.h>
#include <ATen/ops/replication_pad2d_backward_ops.h>
#include <ATen/ops/replication_pad3d_native.h>
#include <ATen/ops/replication_pad3d_ops.h>
#include <ATen/ops/replication_pad3d_native.h>
#include <ATen/ops/replication_pad3d_ops.h>
#include <ATen/ops/replication_pad3d_backward_native.h>
#include <ATen/ops/replication_pad3d_backward_ops.h>
#include <ATen/ops/replication_pad3d_backward_native.h>
#include <ATen/ops/replication_pad3d_backward_ops.h>
#include <ATen/ops/upsample_bilinear2d_native.h>
#include <ATen/ops/upsample_bilinear2d_ops.h>
#include <ATen/ops/upsample_bilinear2d_native.h>
#include <ATen/ops/upsample_bilinear2d_ops.h>
#include <ATen/ops/upsample_nearest2d_native.h>
#include <ATen/ops/upsample_nearest2d_ops.h>
#include <ATen/ops/upsample_nearest2d_native.h>
#include <ATen/ops/upsample_nearest2d_ops.h>
#include <ATen/ops/upsample_linear1d_native.h>
#include <ATen/ops/upsample_linear1d_ops.h>
#include <ATen/ops/upsample_linear1d_native.h>
#include <ATen/ops/upsample_linear1d_ops.h>
#include <ATen/ops/upsample_linear1d_backward_native.h>
#include <ATen/ops/upsample_linear1d_backward_ops.h>
#include <ATen/ops/upsample_linear1d_backward_native.h>
#include <ATen/ops/upsample_linear1d_backward_ops.h>
#include <ATen/ops/upsample_bilinear2d_native.h>
#include <ATen/ops/upsample_bilinear2d_ops.h>
#include <ATen/ops/upsample_bilinear2d_native.h>
#include <ATen/ops/upsample_bilinear2d_ops.h>
#include <ATen/ops/upsample_bilinear2d_backward_native.h>
#include <ATen/ops/upsample_bilinear2d_backward_ops.h>
#include <ATen/ops/upsample_bilinear2d_backward_native.h>
#include <ATen/ops/upsample_bilinear2d_backward_ops.h>
#include <ATen/ops/_upsample_bilinear2d_aa_native.h>
#include <ATen/ops/_upsample_bilinear2d_aa_ops.h>
#include <ATen/ops/_upsample_bilinear2d_aa_native.h>
#include <ATen/ops/_upsample_bilinear2d_aa_ops.h>
#include <ATen/ops/_upsample_bilinear2d_aa_backward_native.h>
#include <ATen/ops/_upsample_bilinear2d_aa_backward_ops.h>
#include <ATen/ops/_upsample_bilinear2d_aa_backward_native.h>
#include <ATen/ops/_upsample_bilinear2d_aa_backward_ops.h>
#include <ATen/ops/upsample_bicubic2d_native.h>
#include <ATen/ops/upsample_bicubic2d_ops.h>
#include <ATen/ops/upsample_bicubic2d_native.h>
#include <ATen/ops/upsample_bicubic2d_ops.h>
#include <ATen/ops/upsample_bicubic2d_backward_native.h>
#include <ATen/ops/upsample_bicubic2d_backward_ops.h>
#include <ATen/ops/upsample_bicubic2d_backward_native.h>
#include <ATen/ops/upsample_bicubic2d_backward_ops.h>
#include <ATen/ops/_upsample_bicubic2d_aa_native.h>
#include <ATen/ops/_upsample_bicubic2d_aa_ops.h>
#include <ATen/ops/_upsample_bicubic2d_aa_native.h>
#include <ATen/ops/_upsample_bicubic2d_aa_ops.h>
#include <ATen/ops/_upsample_bicubic2d_aa_backward_native.h>
#include <ATen/ops/_upsample_bicubic2d_aa_backward_ops.h>
#include <ATen/ops/_upsample_bicubic2d_aa_backward_native.h>
#include <ATen/ops/_upsample_bicubic2d_aa_backward_ops.h>
#include <ATen/ops/upsample_trilinear3d_native.h>
#include <ATen/ops/upsample_trilinear3d_ops.h>
#include <ATen/ops/upsample_trilinear3d_native.h>
#include <ATen/ops/upsample_trilinear3d_ops.h>
#include <ATen/ops/upsample_trilinear3d_backward_native.h>
#include <ATen/ops/upsample_trilinear3d_backward_ops.h>
#include <ATen/ops/upsample_trilinear3d_backward_native.h>
#include <ATen/ops/upsample_trilinear3d_backward_ops.h>
#include <ATen/ops/upsample_nearest1d_native.h>
#include <ATen/ops/upsample_nearest1d_ops.h>
#include <ATen/ops/upsample_nearest1d_native.h>
#include <ATen/ops/upsample_nearest1d_ops.h>
#include <ATen/ops/_upsample_nearest_exact1d_native.h>
#include <ATen/ops/_upsample_nearest_exact1d_ops.h>
#include <ATen/ops/_upsample_nearest_exact1d_native.h>
#include <ATen/ops/_upsample_nearest_exact1d_ops.h>
#include <ATen/ops/upsample_nearest1d_backward_native.h>
#include <ATen/ops/upsample_nearest1d_backward_ops.h>
#include <ATen/ops/upsample_nearest1d_backward_native.h>
#include <ATen/ops/upsample_nearest1d_backward_ops.h>
#include <ATen/ops/_upsample_nearest_exact1d_backward_native.h>
#include <ATen/ops/_upsample_nearest_exact1d_backward_ops.h>
#include <ATen/ops/_upsample_nearest_exact1d_backward_native.h>
#include <ATen/ops/_upsample_nearest_exact1d_backward_ops.h>
#include <ATen/ops/upsample_nearest2d_native.h>
#include <ATen/ops/upsample_nearest2d_ops.h>
#include <ATen/ops/upsample_nearest2d_native.h>
#include <ATen/ops/upsample_nearest2d_ops.h>
#include <ATen/ops/_upsample_nearest_exact2d_native.h>
#include <ATen/ops/_upsample_nearest_exact2d_ops.h>
#include <ATen/ops/_upsample_nearest_exact2d_native.h>
#include <ATen/ops/_upsample_nearest_exact2d_ops.h>
#include <ATen/ops/upsample_nearest2d_backward_native.h>
#include <ATen/ops/upsample_nearest2d_backward_ops.h>
#include <ATen/ops/upsample_nearest2d_backward_native.h>
#include <ATen/ops/upsample_nearest2d_backward_ops.h>
#include <ATen/ops/_upsample_nearest_exact2d_backward_native.h>
#include <ATen/ops/_upsample_nearest_exact2d_backward_ops.h>
#include <ATen/ops/_upsample_nearest_exact2d_backward_native.h>
#include <ATen/ops/_upsample_nearest_exact2d_backward_ops.h>
#include <ATen/ops/upsample_nearest3d_native.h>
#include <ATen/ops/upsample_nearest3d_ops.h>
#include <ATen/ops/upsample_nearest3d_native.h>
#include <ATen/ops/upsample_nearest3d_ops.h>
#include <ATen/ops/_upsample_nearest_exact3d_native.h>
#include <ATen/ops/_upsample_nearest_exact3d_ops.h>
#include <ATen/ops/_upsample_nearest_exact3d_native.h>
#include <ATen/ops/_upsample_nearest_exact3d_ops.h>
#include <ATen/ops/upsample_nearest3d_backward_native.h>
#include <ATen/ops/upsample_nearest3d_backward_ops.h>
#include <ATen/ops/upsample_nearest3d_backward_native.h>
#include <ATen/ops/upsample_nearest3d_backward_ops.h>
#include <ATen/ops/_upsample_nearest_exact3d_backward_native.h>
#include <ATen/ops/_upsample_nearest_exact3d_backward_ops.h>
#include <ATen/ops/_upsample_nearest_exact3d_backward_native.h>
#include <ATen/ops/_upsample_nearest_exact3d_backward_ops.h>
#include <ATen/ops/sigmoid_backward_native.h>
#include <ATen/ops/sigmoid_backward_ops.h>
#include <ATen/ops/sigmoid_backward_native.h>
#include <ATen/ops/sigmoid_backward_ops.h>
#include <ATen/ops/logit_backward_native.h>
#include <ATen/ops/logit_backward_ops.h>
#include <ATen/ops/logit_backward_native.h>
#include <ATen/ops/logit_backward_ops.h>
#include <ATen/ops/tanh_backward_native.h>
#include <ATen/ops/tanh_backward_ops.h>
#include <ATen/ops/tanh_backward_native.h>
#include <ATen/ops/tanh_backward_ops.h>
#include <ATen/ops/slow_conv_transpose2d_native.h>
#include <ATen/ops/slow_conv_transpose2d_ops.h>
#include <ATen/ops/slow_conv_transpose2d_native.h>
#include <ATen/ops/slow_conv_transpose2d_ops.h>
#include <ATen/ops/slow_conv_transpose3d_native.h>
#include <ATen/ops/slow_conv_transpose3d_ops.h>
#include <ATen/ops/slow_conv_transpose3d_native.h>
#include <ATen/ops/slow_conv_transpose3d_ops.h>
#include <ATen/ops/thnn_conv2d_native.h>
#include <ATen/ops/thnn_conv2d_ops.h>
#include <ATen/ops/thnn_conv2d_native.h>
#include <ATen/ops/thnn_conv2d_ops.h>
#include <ATen/ops/_slow_conv2d_forward_native.h>
#include <ATen/ops/_slow_conv2d_forward_ops.h>
#include <ATen/ops/_slow_conv2d_forward_native.h>
#include <ATen/ops/_slow_conv2d_forward_ops.h>
#include <ATen/ops/_slow_conv2d_backward_native.h>
#include <ATen/ops/_slow_conv2d_backward_ops.h>
#include <ATen/ops/_slow_conv2d_backward_native.h>
#include <ATen/ops/_slow_conv2d_backward_ops.h>
#include <ATen/ops/_conv_depthwise2d_native.h>
#include <ATen/ops/_conv_depthwise2d_ops.h>
#include <ATen/ops/_conv_depthwise2d_native.h>
#include <ATen/ops/_conv_depthwise2d_ops.h>
#include <ATen/ops/conv_depthwise3d_native.h>
#include <ATen/ops/conv_depthwise3d_ops.h>
#include <ATen/ops/conv_depthwise3d_native.h>
#include <ATen/ops/conv_depthwise3d_ops.h>
#include <ATen/ops/slow_conv3d_native.h>
#include <ATen/ops/slow_conv3d_ops.h>
#include <ATen/ops/slow_conv3d_native.h>
#include <ATen/ops/slow_conv3d_ops.h>
#include <ATen/ops/slow_conv3d_forward_native.h>
#include <ATen/ops/slow_conv3d_forward_ops.h>
#include <ATen/ops/slow_conv3d_forward_native.h>
#include <ATen/ops/slow_conv3d_forward_ops.h>
#include <ATen/ops/slow_conv_dilated2d_native.h>
#include <ATen/ops/slow_conv_dilated2d_ops.h>
#include <ATen/ops/slow_conv_dilated2d_native.h>
#include <ATen/ops/slow_conv_dilated2d_ops.h>
#include <ATen/ops/slow_conv_dilated3d_native.h>
#include <ATen/ops/slow_conv_dilated3d_ops.h>
#include <ATen/ops/slow_conv_dilated3d_native.h>
#include <ATen/ops/slow_conv_dilated3d_ops.h>
#include <ATen/ops/col2im_native.h>
#include <ATen/ops/col2im_ops.h>
#include <ATen/ops/col2im_native.h>
#include <ATen/ops/col2im_ops.h>
#include <ATen/ops/column_stack_native.h>
#include <ATen/ops/column_stack_ops.h>
#include <ATen/ops/column_stack_native.h>
#include <ATen/ops/column_stack_ops.h>
#include <ATen/ops/im2col_native.h>
#include <ATen/ops/im2col_ops.h>
#include <ATen/ops/im2col_native.h>
#include <ATen/ops/im2col_ops.h>
#include <ATen/ops/isinf_native.h>
#include <ATen/ops/isinf_ops.h>
#include <ATen/ops/isinf_native.h>
#include <ATen/ops/isinf_ops.h>
#include <ATen/ops/isposinf_native.h>
#include <ATen/ops/isposinf_ops.h>
#include <ATen/ops/isposinf_native.h>
#include <ATen/ops/isposinf_ops.h>
#include <ATen/ops/isneginf_native.h>
#include <ATen/ops/isneginf_ops.h>
#include <ATen/ops/isneginf_native.h>
#include <ATen/ops/isneginf_ops.h>
#include <ATen/ops/special_entr_native.h>
#include <ATen/ops/special_entr_ops.h>
#include <ATen/ops/special_entr_native.h>
#include <ATen/ops/special_entr_ops.h>
#include <ATen/ops/special_ndtri_native.h>
#include <ATen/ops/special_ndtri_ops.h>
#include <ATen/ops/special_ndtri_native.h>
#include <ATen/ops/special_ndtri_ops.h>
#include <ATen/ops/special_log_ndtr_native.h>
#include <ATen/ops/special_log_ndtr_ops.h>
#include <ATen/ops/special_log_ndtr_native.h>
#include <ATen/ops/special_log_ndtr_ops.h>
#include <ATen/ops/special_expm1_native.h>
#include <ATen/ops/special_expm1_ops.h>
#include <ATen/ops/special_expm1_native.h>
#include <ATen/ops/special_expm1_ops.h>
#include <ATen/ops/special_exp2_native.h>
#include <ATen/ops/special_exp2_ops.h>
#include <ATen/ops/special_exp2_native.h>
#include <ATen/ops/special_exp2_ops.h>
#include <ATen/ops/special_psi_native.h>
#include <ATen/ops/special_psi_ops.h>
#include <ATen/ops/special_psi_native.h>
#include <ATen/ops/special_psi_ops.h>
#include <ATen/ops/special_digamma_native.h>
#include <ATen/ops/special_digamma_ops.h>
#include <ATen/ops/special_digamma_native.h>
#include <ATen/ops/special_digamma_ops.h>
#include <ATen/ops/special_gammaln_native.h>
#include <ATen/ops/special_gammaln_ops.h>
#include <ATen/ops/special_gammaln_native.h>
#include <ATen/ops/special_gammaln_ops.h>
#include <ATen/ops/special_erf_native.h>
#include <ATen/ops/special_erf_ops.h>
#include <ATen/ops/special_erf_native.h>
#include <ATen/ops/special_erf_ops.h>
#include <ATen/ops/special_erfc_native.h>
#include <ATen/ops/special_erfc_ops.h>
#include <ATen/ops/special_erfc_native.h>
#include <ATen/ops/special_erfc_ops.h>
#include <ATen/ops/special_erfcx_native.h>
#include <ATen/ops/special_erfcx_ops.h>
#include <ATen/ops/special_erfcx_native.h>
#include <ATen/ops/special_erfcx_ops.h>
#include <ATen/ops/special_erfinv_native.h>
#include <ATen/ops/special_erfinv_ops.h>
#include <ATen/ops/special_erfinv_native.h>
#include <ATen/ops/special_erfinv_ops.h>
#include <ATen/ops/special_ndtr_native.h>
#include <ATen/ops/special_ndtr_ops.h>
#include <ATen/ops/special_ndtr_native.h>
#include <ATen/ops/special_ndtr_ops.h>
#include <ATen/ops/special_xlog1py_native.h>
#include <ATen/ops/special_xlog1py_ops.h>
#include <ATen/ops/special_xlog1py_native.h>
#include <ATen/ops/special_xlog1py_ops.h>
#include <ATen/ops/special_xlog1py_native.h>
#include <ATen/ops/special_xlog1py_ops.h>
#include <ATen/ops/special_xlog1py_native.h>
#include <ATen/ops/special_xlog1py_ops.h>
#include <ATen/ops/special_xlog1py_native.h>
#include <ATen/ops/special_xlog1py_ops.h>
#include <ATen/ops/special_xlog1py_native.h>
#include <ATen/ops/special_xlog1py_ops.h>
#include <ATen/ops/special_xlogy_native.h>
#include <ATen/ops/special_xlogy_ops.h>
#include <ATen/ops/special_xlogy_native.h>
#include <ATen/ops/special_xlogy_ops.h>
#include <ATen/ops/special_xlogy_native.h>
#include <ATen/ops/special_xlogy_ops.h>
#include <ATen/ops/special_xlogy_native.h>
#include <ATen/ops/special_xlogy_ops.h>
#include <ATen/ops/special_xlogy_native.h>
#include <ATen/ops/special_xlogy_ops.h>
#include <ATen/ops/special_xlogy_native.h>
#include <ATen/ops/special_xlogy_ops.h>
#include <ATen/ops/special_zeta_native.h>
#include <ATen/ops/special_zeta_ops.h>
#include <ATen/ops/special_zeta_native.h>
#include <ATen/ops/special_zeta_ops.h>
#include <ATen/ops/special_zeta_native.h>
#include <ATen/ops/special_zeta_ops.h>
#include <ATen/ops/special_zeta_native.h>
#include <ATen/ops/special_zeta_ops.h>
#include <ATen/ops/special_zeta_native.h>
#include <ATen/ops/special_zeta_ops.h>
#include <ATen/ops/special_zeta_native.h>
#include <ATen/ops/special_zeta_ops.h>
#include <ATen/ops/special_i0_native.h>
#include <ATen/ops/special_i0_ops.h>
#include <ATen/ops/special_i0_native.h>
#include <ATen/ops/special_i0_ops.h>
#include <ATen/ops/special_i0e_native.h>
#include <ATen/ops/special_i0e_ops.h>
#include <ATen/ops/special_i0e_native.h>
#include <ATen/ops/special_i0e_ops.h>
#include <ATen/ops/special_i1_native.h>
#include <ATen/ops/special_i1_ops.h>
#include <ATen/ops/special_i1_native.h>
#include <ATen/ops/special_i1_ops.h>
#include <ATen/ops/special_i1e_native.h>
#include <ATen/ops/special_i1e_ops.h>
#include <ATen/ops/special_i1e_native.h>
#include <ATen/ops/special_i1e_ops.h>
#include <ATen/ops/special_logit_native.h>
#include <ATen/ops/special_logit_ops.h>
#include <ATen/ops/special_logit_native.h>
#include <ATen/ops/special_logit_ops.h>
#include <ATen/ops/special_polygamma_native.h>
#include <ATen/ops/special_polygamma_ops.h>
#include <ATen/ops/special_polygamma_native.h>
#include <ATen/ops/special_polygamma_ops.h>
#include <ATen/ops/special_logsumexp_native.h>
#include <ATen/ops/special_logsumexp_ops.h>
#include <ATen/ops/special_logsumexp_native.h>
#include <ATen/ops/special_logsumexp_ops.h>
#include <ATen/ops/special_expit_native.h>
#include <ATen/ops/special_expit_ops.h>
#include <ATen/ops/special_expit_native.h>
#include <ATen/ops/special_expit_ops.h>
#include <ATen/ops/special_sinc_native.h>
#include <ATen/ops/special_sinc_ops.h>
#include <ATen/ops/special_sinc_native.h>
#include <ATen/ops/special_sinc_ops.h>
#include <ATen/ops/special_round_native.h>
#include <ATen/ops/special_round_ops.h>
#include <ATen/ops/special_round_native.h>
#include <ATen/ops/special_round_ops.h>
#include <ATen/ops/special_log1p_native.h>
#include <ATen/ops/special_log1p_ops.h>
#include <ATen/ops/special_log1p_native.h>
#include <ATen/ops/special_log1p_ops.h>
#include <ATen/ops/special_gammainc_native.h>
#include <ATen/ops/special_gammainc_ops.h>
#include <ATen/ops/special_gammainc_native.h>
#include <ATen/ops/special_gammainc_ops.h>
#include <ATen/ops/special_gammaincc_native.h>
#include <ATen/ops/special_gammaincc_ops.h>
#include <ATen/ops/special_gammaincc_native.h>
#include <ATen/ops/special_gammaincc_ops.h>
#include <ATen/ops/special_multigammaln_native.h>
#include <ATen/ops/special_multigammaln_ops.h>
#include <ATen/ops/special_multigammaln_native.h>
#include <ATen/ops/special_multigammaln_ops.h>
#include <ATen/ops/fft_fft_native.h>
#include <ATen/ops/fft_fft_ops.h>
#include <ATen/ops/fft_fft_native.h>
#include <ATen/ops/fft_fft_ops.h>
#include <ATen/ops/fft_ifft_native.h>
#include <ATen/ops/fft_ifft_ops.h>
#include <ATen/ops/fft_ifft_native.h>
#include <ATen/ops/fft_ifft_ops.h>
#include <ATen/ops/fft_rfft_native.h>
#include <ATen/ops/fft_rfft_ops.h>
#include <ATen/ops/fft_rfft_native.h>
#include <ATen/ops/fft_rfft_ops.h>
#include <ATen/ops/fft_irfft_native.h>
#include <ATen/ops/fft_irfft_ops.h>
#include <ATen/ops/fft_irfft_native.h>
#include <ATen/ops/fft_irfft_ops.h>
#include <ATen/ops/fft_hfft_native.h>
#include <ATen/ops/fft_hfft_ops.h>
#include <ATen/ops/fft_hfft_native.h>
#include <ATen/ops/fft_hfft_ops.h>
#include <ATen/ops/fft_ihfft_native.h>
#include <ATen/ops/fft_ihfft_ops.h>
#include <ATen/ops/fft_ihfft_native.h>
#include <ATen/ops/fft_ihfft_ops.h>
#include <ATen/ops/fft_fft2_native.h>
#include <ATen/ops/fft_fft2_ops.h>
#include <ATen/ops/fft_fft2_native.h>
#include <ATen/ops/fft_fft2_ops.h>
#include <ATen/ops/fft_ifft2_native.h>
#include <ATen/ops/fft_ifft2_ops.h>
#include <ATen/ops/fft_ifft2_native.h>
#include <ATen/ops/fft_ifft2_ops.h>
#include <ATen/ops/fft_rfft2_native.h>
#include <ATen/ops/fft_rfft2_ops.h>
#include <ATen/ops/fft_rfft2_native.h>
#include <ATen/ops/fft_rfft2_ops.h>
#include <ATen/ops/fft_irfft2_native.h>
#include <ATen/ops/fft_irfft2_ops.h>
#include <ATen/ops/fft_irfft2_native.h>
#include <ATen/ops/fft_irfft2_ops.h>
#include <ATen/ops/fft_hfft2_native.h>
#include <ATen/ops/fft_hfft2_ops.h>
#include <ATen/ops/fft_hfft2_native.h>
#include <ATen/ops/fft_hfft2_ops.h>
#include <ATen/ops/fft_ihfft2_native.h>
#include <ATen/ops/fft_ihfft2_ops.h>
#include <ATen/ops/fft_ihfft2_native.h>
#include <ATen/ops/fft_ihfft2_ops.h>
#include <ATen/ops/fft_fftn_native.h>
#include <ATen/ops/fft_fftn_ops.h>
#include <ATen/ops/fft_fftn_native.h>
#include <ATen/ops/fft_fftn_ops.h>
#include <ATen/ops/fft_ifftn_native.h>
#include <ATen/ops/fft_ifftn_ops.h>
#include <ATen/ops/fft_ifftn_native.h>
#include <ATen/ops/fft_ifftn_ops.h>
#include <ATen/ops/fft_rfftn_native.h>
#include <ATen/ops/fft_rfftn_ops.h>
#include <ATen/ops/fft_rfftn_native.h>
#include <ATen/ops/fft_rfftn_ops.h>
#include <ATen/ops/fft_irfftn_native.h>
#include <ATen/ops/fft_irfftn_ops.h>
#include <ATen/ops/fft_irfftn_native.h>
#include <ATen/ops/fft_irfftn_ops.h>
#include <ATen/ops/fft_hfftn_native.h>
#include <ATen/ops/fft_hfftn_ops.h>
#include <ATen/ops/fft_hfftn_native.h>
#include <ATen/ops/fft_hfftn_ops.h>
#include <ATen/ops/fft_ihfftn_native.h>
#include <ATen/ops/fft_ihfftn_ops.h>
#include <ATen/ops/fft_ihfftn_native.h>
#include <ATen/ops/fft_ihfftn_ops.h>
#include <ATen/ops/fft_fftfreq_native.h>
#include <ATen/ops/fft_fftfreq_ops.h>
#include <ATen/ops/fft_fftfreq_native.h>
#include <ATen/ops/fft_fftfreq_ops.h>
#include <ATen/ops/fft_rfftfreq_native.h>
#include <ATen/ops/fft_rfftfreq_ops.h>
#include <ATen/ops/fft_rfftfreq_native.h>
#include <ATen/ops/fft_rfftfreq_ops.h>
#include <ATen/ops/linalg_cholesky_ex_native.h>
#include <ATen/ops/linalg_cholesky_ex_ops.h>
#include <ATen/ops/linalg_cholesky_ex_native.h>
#include <ATen/ops/linalg_cholesky_ex_ops.h>
#include <ATen/ops/linalg_cholesky_native.h>
#include <ATen/ops/linalg_cholesky_ops.h>
#include <ATen/ops/linalg_cholesky_native.h>
#include <ATen/ops/linalg_cholesky_ops.h>
#include <ATen/ops/linalg_cross_native.h>
#include <ATen/ops/linalg_cross_ops.h>
#include <ATen/ops/linalg_cross_native.h>
#include <ATen/ops/linalg_cross_ops.h>
#include <ATen/ops/linalg_lu_factor_native.h>
#include <ATen/ops/linalg_lu_factor_ops.h>
#include <ATen/ops/linalg_lu_factor_native.h>
#include <ATen/ops/linalg_lu_factor_ops.h>
#include <ATen/ops/linalg_lu_factor_ex_native.h>
#include <ATen/ops/linalg_lu_factor_ex_ops.h>
#include <ATen/ops/linalg_lu_factor_ex_native.h>
#include <ATen/ops/linalg_lu_factor_ex_ops.h>
#include <ATen/ops/linalg_lu_native.h>
#include <ATen/ops/linalg_lu_ops.h>
#include <ATen/ops/linalg_lu_native.h>
#include <ATen/ops/linalg_lu_ops.h>
#include <ATen/ops/linalg_lu_solve_native.h>
#include <ATen/ops/linalg_lu_solve_ops.h>
#include <ATen/ops/linalg_lu_solve_native.h>
#include <ATen/ops/linalg_lu_solve_ops.h>
#include <ATen/ops/_linalg_det_native.h>
#include <ATen/ops/_linalg_det_ops.h>
#include <ATen/ops/_linalg_det_native.h>
#include <ATen/ops/_linalg_det_ops.h>
#include <ATen/ops/linalg_det_native.h>
#include <ATen/ops/linalg_det_ops.h>
#include <ATen/ops/linalg_det_native.h>
#include <ATen/ops/linalg_det_ops.h>
#include <ATen/ops/linalg_ldl_factor_ex_native.h>
#include <ATen/ops/linalg_ldl_factor_ex_ops.h>
#include <ATen/ops/linalg_ldl_factor_ex_native.h>
#include <ATen/ops/linalg_ldl_factor_ex_ops.h>
#include <ATen/ops/linalg_ldl_factor_native.h>
#include <ATen/ops/linalg_ldl_factor_ops.h>
#include <ATen/ops/linalg_ldl_factor_native.h>
#include <ATen/ops/linalg_ldl_factor_ops.h>
#include <ATen/ops/linalg_ldl_solve_native.h>
#include <ATen/ops/linalg_ldl_solve_ops.h>
#include <ATen/ops/linalg_ldl_solve_native.h>
#include <ATen/ops/linalg_ldl_solve_ops.h>
#include <ATen/ops/linalg_lstsq_native.h>
#include <ATen/ops/linalg_lstsq_ops.h>
#include <ATen/ops/linalg_lstsq_native.h>
#include <ATen/ops/linalg_lstsq_ops.h>
#include <ATen/ops/linalg_matmul_native.h>
#include <ATen/ops/linalg_matmul_ops.h>
#include <ATen/ops/linalg_matmul_native.h>
#include <ATen/ops/linalg_matmul_ops.h>
#include <ATen/ops/linalg_vecdot_native.h>
#include <ATen/ops/linalg_vecdot_ops.h>
#include <ATen/ops/linalg_vecdot_native.h>
#include <ATen/ops/linalg_vecdot_ops.h>
#include <ATen/ops/linalg_matrix_exp_native.h>
#include <ATen/ops/linalg_matrix_exp_ops.h>
#include <ATen/ops/linalg_matrix_exp_native.h>
#include <ATen/ops/linalg_matrix_exp_ops.h>
#include <ATen/ops/_linalg_slogdet_native.h>
#include <ATen/ops/_linalg_slogdet_ops.h>
#include <ATen/ops/_linalg_slogdet_native.h>
#include <ATen/ops/_linalg_slogdet_ops.h>
#include <ATen/ops/linalg_slogdet_native.h>
#include <ATen/ops/linalg_slogdet_ops.h>
#include <ATen/ops/linalg_slogdet_native.h>
#include <ATen/ops/linalg_slogdet_ops.h>
#include <ATen/ops/slogdet_native.h>
#include <ATen/ops/slogdet_ops.h>
#include <ATen/ops/slogdet_native.h>
#include <ATen/ops/slogdet_ops.h>
#include <ATen/ops/linalg_eig_native.h>
#include <ATen/ops/linalg_eig_ops.h>
#include <ATen/ops/linalg_eig_native.h>
#include <ATen/ops/linalg_eig_ops.h>
#include <ATen/ops/linalg_eigvals_native.h>
#include <ATen/ops/linalg_eigvals_ops.h>
#include <ATen/ops/linalg_eigvals_native.h>
#include <ATen/ops/linalg_eigvals_ops.h>
#include <ATen/ops/_linalg_eigh_native.h>
#include <ATen/ops/_linalg_eigh_ops.h>
#include <ATen/ops/_linalg_eigh_native.h>
#include <ATen/ops/_linalg_eigh_ops.h>
#include <ATen/ops/linalg_eigh_native.h>
#include <ATen/ops/linalg_eigh_ops.h>
#include <ATen/ops/linalg_eigh_native.h>
#include <ATen/ops/linalg_eigh_ops.h>
#include <ATen/ops/linalg_eigvalsh_native.h>
#include <ATen/ops/linalg_eigvalsh_ops.h>
#include <ATen/ops/linalg_eigvalsh_native.h>
#include <ATen/ops/linalg_eigvalsh_ops.h>
#include <ATen/ops/linalg_householder_product_native.h>
#include <ATen/ops/linalg_householder_product_ops.h>
#include <ATen/ops/linalg_householder_product_native.h>
#include <ATen/ops/linalg_householder_product_ops.h>
#include <ATen/ops/linalg_inv_ex_native.h>
#include <ATen/ops/linalg_inv_ex_ops.h>
#include <ATen/ops/linalg_inv_ex_native.h>
#include <ATen/ops/linalg_inv_ex_ops.h>
#include <ATen/ops/linalg_inv_native.h>
#include <ATen/ops/linalg_inv_ops.h>
#include <ATen/ops/linalg_inv_native.h>
#include <ATen/ops/linalg_inv_ops.h>
#include <ATen/ops/inverse_native.h>
#include <ATen/ops/inverse_ops.h>
#include <ATen/ops/inverse_native.h>
#include <ATen/ops/inverse_ops.h>
#include <ATen/ops/inner_native.h>
#include <ATen/ops/inner_ops.h>
#include <ATen/ops/inner_native.h>
#include <ATen/ops/inner_ops.h>
#include <ATen/ops/outer_native.h>
#include <ATen/ops/outer_ops.h>
#include <ATen/ops/outer_native.h>
#include <ATen/ops/outer_ops.h>
#include <ATen/ops/ger_native.h>
#include <ATen/ops/ger_ops.h>
#include <ATen/ops/ger_native.h>
#include <ATen/ops/ger_ops.h>
#include <ATen/ops/linalg_norm_native.h>
#include <ATen/ops/linalg_norm_ops.h>
#include <ATen/ops/linalg_norm_native.h>
#include <ATen/ops/linalg_norm_ops.h>
#include <ATen/ops/linalg_norm_native.h>
#include <ATen/ops/linalg_norm_ops.h>
#include <ATen/ops/linalg_norm_native.h>
#include <ATen/ops/linalg_norm_ops.h>
#include <ATen/ops/linalg_vector_norm_native.h>
#include <ATen/ops/linalg_vector_norm_ops.h>
#include <ATen/ops/linalg_vector_norm_native.h>
#include <ATen/ops/linalg_vector_norm_ops.h>
#include <ATen/ops/linalg_matrix_norm_native.h>
#include <ATen/ops/linalg_matrix_norm_ops.h>
#include <ATen/ops/linalg_matrix_norm_native.h>
#include <ATen/ops/linalg_matrix_norm_ops.h>
#include <ATen/ops/linalg_matrix_norm_native.h>
#include <ATen/ops/linalg_matrix_norm_ops.h>
#include <ATen/ops/linalg_matrix_norm_native.h>
#include <ATen/ops/linalg_matrix_norm_ops.h>
#include <ATen/ops/_linalg_svd_native.h>
#include <ATen/ops/_linalg_svd_ops.h>
#include <ATen/ops/_linalg_svd_native.h>
#include <ATen/ops/_linalg_svd_ops.h>
#include <ATen/ops/linalg_svd_native.h>
#include <ATen/ops/linalg_svd_ops.h>
#include <ATen/ops/linalg_svd_native.h>
#include <ATen/ops/linalg_svd_ops.h>
#include <ATen/ops/linalg_svdvals_native.h>
#include <ATen/ops/linalg_svdvals_ops.h>
#include <ATen/ops/linalg_svdvals_native.h>
#include <ATen/ops/linalg_svdvals_ops.h>
#include <ATen/ops/linalg_cond_native.h>
#include <ATen/ops/linalg_cond_ops.h>
#include <ATen/ops/linalg_cond_native.h>
#include <ATen/ops/linalg_cond_ops.h>
#include <ATen/ops/linalg_cond_native.h>
#include <ATen/ops/linalg_cond_ops.h>
#include <ATen/ops/linalg_cond_native.h>
#include <ATen/ops/linalg_cond_ops.h>
#include <ATen/ops/linalg_pinv_native.h>
#include <ATen/ops/linalg_pinv_ops.h>
#include <ATen/ops/linalg_pinv_native.h>
#include <ATen/ops/linalg_pinv_ops.h>
#include <ATen/ops/linalg_pinv_native.h>
#include <ATen/ops/linalg_pinv_ops.h>
#include <ATen/ops/linalg_pinv_native.h>
#include <ATen/ops/linalg_pinv_ops.h>
#include <ATen/ops/linalg_pinv_native.h>
#include <ATen/ops/linalg_pinv_ops.h>
#include <ATen/ops/linalg_pinv_native.h>
#include <ATen/ops/linalg_pinv_ops.h>
#include <ATen/ops/linalg_pinv_native.h>
#include <ATen/ops/linalg_pinv_ops.h>
#include <ATen/ops/linalg_pinv_native.h>
#include <ATen/ops/linalg_pinv_ops.h>
#include <ATen/ops/_linalg_solve_ex_native.h>
#include <ATen/ops/_linalg_solve_ex_ops.h>
#include <ATen/ops/_linalg_solve_ex_native.h>
#include <ATen/ops/_linalg_solve_ex_ops.h>
#include <ATen/ops/linalg_solve_ex_native.h>
#include <ATen/ops/linalg_solve_ex_ops.h>
#include <ATen/ops/linalg_solve_ex_native.h>
#include <ATen/ops/linalg_solve_ex_ops.h>
#include <ATen/ops/linalg_solve_native.h>
#include <ATen/ops/linalg_solve_ops.h>
#include <ATen/ops/linalg_solve_native.h>
#include <ATen/ops/linalg_solve_ops.h>
#include <ATen/ops/linalg_tensorinv_native.h>
#include <ATen/ops/linalg_tensorinv_ops.h>
#include <ATen/ops/linalg_tensorinv_native.h>
#include <ATen/ops/linalg_tensorinv_ops.h>
#include <ATen/ops/linalg_tensorsolve_native.h>
#include <ATen/ops/linalg_tensorsolve_ops.h>
#include <ATen/ops/linalg_tensorsolve_native.h>
#include <ATen/ops/linalg_tensorsolve_ops.h>
#include <ATen/ops/linalg_qr_native.h>
#include <ATen/ops/linalg_qr_ops.h>
#include <ATen/ops/linalg_qr_native.h>
#include <ATen/ops/linalg_qr_ops.h>
#include <ATen/ops/linalg_matrix_power_native.h>
#include <ATen/ops/linalg_matrix_power_ops.h>
#include <ATen/ops/linalg_matrix_power_native.h>
#include <ATen/ops/linalg_matrix_power_ops.h>
#include <ATen/ops/linalg_matrix_rank_native.h>
#include <ATen/ops/linalg_matrix_rank_ops.h>
#include <ATen/ops/linalg_matrix_rank_native.h>
#include <ATen/ops/linalg_matrix_rank_ops.h>
#include <ATen/ops/linalg_matrix_rank_native.h>
#include <ATen/ops/linalg_matrix_rank_ops.h>
#include <ATen/ops/linalg_matrix_rank_native.h>
#include <ATen/ops/linalg_matrix_rank_ops.h>
#include <ATen/ops/linalg_matrix_rank_native.h>
#include <ATen/ops/linalg_matrix_rank_ops.h>
#include <ATen/ops/linalg_matrix_rank_native.h>
#include <ATen/ops/linalg_matrix_rank_ops.h>
#include <ATen/ops/linalg_matrix_rank_native.h>
#include <ATen/ops/linalg_matrix_rank_ops.h>
#include <ATen/ops/linalg_matrix_rank_native.h>
#include <ATen/ops/linalg_matrix_rank_ops.h>
#include <ATen/ops/linalg_multi_dot_native.h>
#include <ATen/ops/linalg_multi_dot_ops.h>
#include <ATen/ops/linalg_multi_dot_native.h>
#include <ATen/ops/linalg_multi_dot_ops.h>
#include <ATen/ops/_test_optional_intlist_native.h>
#include <ATen/ops/_test_optional_intlist_ops.h>
#include <ATen/ops/_test_optional_intlist_native.h>
#include <ATen/ops/_test_optional_intlist_ops.h>
#include <ATen/ops/_test_optional_filled_intlist_native.h>
#include <ATen/ops/_test_optional_filled_intlist_ops.h>
#include <ATen/ops/_test_optional_filled_intlist_native.h>
#include <ATen/ops/_test_optional_filled_intlist_ops.h>
#include <ATen/ops/_test_optional_floatlist_native.h>
#include <ATen/ops/_test_optional_floatlist_ops.h>
#include <ATen/ops/_test_optional_floatlist_native.h>
#include <ATen/ops/_test_optional_floatlist_ops.h>
#include <ATen/ops/_test_warn_in_autograd_native.h>
#include <ATen/ops/_test_warn_in_autograd_ops.h>
#include <ATen/ops/_test_warn_in_autograd_native.h>
#include <ATen/ops/_test_warn_in_autograd_ops.h>
#include <ATen/ops/_test_autograd_multiple_dispatch_native.h>
#include <ATen/ops/_test_autograd_multiple_dispatch_ops.h>
#include <ATen/ops/_test_autograd_multiple_dispatch_native.h>
#include <ATen/ops/_test_autograd_multiple_dispatch_ops.h>
#include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_native.h>
#include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_ops.h>
#include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_native.h>
#include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_ops.h>
#include <ATen/ops/segment_reduce_native.h>
#include <ATen/ops/segment_reduce_ops.h>
#include <ATen/ops/segment_reduce_native.h>
#include <ATen/ops/segment_reduce_ops.h>
#include <ATen/ops/_segment_reduce_backward_native.h>
#include <ATen/ops/_segment_reduce_backward_ops.h>
#include <ATen/ops/_segment_reduce_backward_native.h>
#include <ATen/ops/_segment_reduce_backward_ops.h>
#include <ATen/ops/_nested_tensor_from_tensor_list_native.h>
#include <ATen/ops/_nested_tensor_from_tensor_list_ops.h>
#include <ATen/ops/_nested_tensor_from_tensor_list_native.h>
#include <ATen/ops/_nested_tensor_from_tensor_list_ops.h>
#include <ATen/ops/_fw_primal_copy_native.h>
#include <ATen/ops/_fw_primal_copy_ops.h>
#include <ATen/ops/_fw_primal_copy_native.h>
#include <ATen/ops/_fw_primal_copy_ops.h>
#include <ATen/ops/_make_dual_copy_native.h>
#include <ATen/ops/_make_dual_copy_ops.h>
#include <ATen/ops/_make_dual_copy_native.h>
#include <ATen/ops/_make_dual_copy_ops.h>
#include <ATen/ops/view_as_real_copy_native.h>
#include <ATen/ops/view_as_real_copy_ops.h>
#include <ATen/ops/view_as_real_copy_native.h>
#include <ATen/ops/view_as_real_copy_ops.h>
#include <ATen/ops/view_as_complex_copy_native.h>
#include <ATen/ops/view_as_complex_copy_ops.h>
#include <ATen/ops/view_as_complex_copy_native.h>
#include <ATen/ops/view_as_complex_copy_ops.h>
#include <ATen/ops/_conj_copy_native.h>
#include <ATen/ops/_conj_copy_ops.h>
#include <ATen/ops/_conj_copy_native.h>
#include <ATen/ops/_conj_copy_ops.h>
#include <ATen/ops/_neg_view_copy_native.h>
#include <ATen/ops/_neg_view_copy_ops.h>
#include <ATen/ops/_neg_view_copy_native.h>
#include <ATen/ops/_neg_view_copy_ops.h>
#include <ATen/ops/as_strided_copy_native.h>
#include <ATen/ops/as_strided_copy_ops.h>
#include <ATen/ops/as_strided_copy_native.h>
#include <ATen/ops/as_strided_copy_ops.h>
#include <ATen/ops/_sparse_broadcast_to_copy_native.h>
#include <ATen/ops/_sparse_broadcast_to_copy_ops.h>
#include <ATen/ops/_sparse_broadcast_to_copy_native.h>
#include <ATen/ops/_sparse_broadcast_to_copy_ops.h>
#include <ATen/ops/diagonal_copy_native.h>
#include <ATen/ops/diagonal_copy_ops.h>
#include <ATen/ops/diagonal_copy_native.h>
#include <ATen/ops/diagonal_copy_ops.h>
#include <ATen/ops/expand_copy_native.h>
#include <ATen/ops/expand_copy_ops.h>
#include <ATen/ops/expand_copy_native.h>
#include <ATen/ops/expand_copy_ops.h>
#include <ATen/ops/permute_copy_native.h>
#include <ATen/ops/permute_copy_ops.h>
#include <ATen/ops/permute_copy_native.h>
#include <ATen/ops/permute_copy_ops.h>
#include <ATen/ops/_reshape_alias_copy_native.h>
#include <ATen/ops/_reshape_alias_copy_ops.h>
#include <ATen/ops/_reshape_alias_copy_native.h>
#include <ATen/ops/_reshape_alias_copy_ops.h>
#include <ATen/ops/select_copy_native.h>
#include <ATen/ops/select_copy_ops.h>
#include <ATen/ops/select_copy_native.h>
#include <ATen/ops/select_copy_ops.h>
#include <ATen/ops/detach_copy_native.h>
#include <ATen/ops/detach_copy_ops.h>
#include <ATen/ops/detach_copy_native.h>
#include <ATen/ops/detach_copy_ops.h>
#include <ATen/ops/slice_copy_native.h>
#include <ATen/ops/slice_copy_ops.h>
#include <ATen/ops/slice_copy_native.h>
#include <ATen/ops/slice_copy_ops.h>
#include <ATen/ops/split_copy_native.h>
#include <ATen/ops/split_copy_ops.h>
#include <ATen/ops/split_copy_native.h>
#include <ATen/ops/split_copy_ops.h>
#include <ATen/ops/split_with_sizes_copy_native.h>
#include <ATen/ops/split_with_sizes_copy_ops.h>
#include <ATen/ops/split_with_sizes_copy_native.h>
#include <ATen/ops/split_with_sizes_copy_ops.h>
#include <ATen/ops/squeeze_copy_native.h>
#include <ATen/ops/squeeze_copy_ops.h>
#include <ATen/ops/squeeze_copy_native.h>
#include <ATen/ops/squeeze_copy_ops.h>
#include <ATen/ops/squeeze_copy_native.h>
#include <ATen/ops/squeeze_copy_ops.h>
#include <ATen/ops/squeeze_copy_native.h>
#include <ATen/ops/squeeze_copy_ops.h>
#include <ATen/ops/squeeze_copy_native.h>
#include <ATen/ops/squeeze_copy_ops.h>
#include <ATen/ops/squeeze_copy_native.h>
#include <ATen/ops/squeeze_copy_ops.h>
#include <ATen/ops/t_copy_native.h>
#include <ATen/ops/t_copy_ops.h>
#include <ATen/ops/t_copy_native.h>
#include <ATen/ops/t_copy_ops.h>
#include <ATen/ops/transpose_copy_native.h>
#include <ATen/ops/transpose_copy_ops.h>
#include <ATen/ops/transpose_copy_native.h>
#include <ATen/ops/transpose_copy_ops.h>
#include <ATen/ops/unsqueeze_copy_native.h>
#include <ATen/ops/unsqueeze_copy_ops.h>
#include <ATen/ops/unsqueeze_copy_native.h>
#include <ATen/ops/unsqueeze_copy_ops.h>
#include <ATen/ops/_indices_copy_native.h>
#include <ATen/ops/_indices_copy_ops.h>
#include <ATen/ops/_indices_copy_native.h>
#include <ATen/ops/_indices_copy_ops.h>
#include <ATen/ops/_values_copy_native.h>
#include <ATen/ops/_values_copy_ops.h>
#include <ATen/ops/_values_copy_native.h>
#include <ATen/ops/_values_copy_ops.h>
#include <ATen/ops/indices_copy_native.h>
#include <ATen/ops/indices_copy_ops.h>
#include <ATen/ops/indices_copy_native.h>
#include <ATen/ops/indices_copy_ops.h>
#include <ATen/ops/values_copy_native.h>
#include <ATen/ops/values_copy_ops.h>
#include <ATen/ops/values_copy_native.h>
#include <ATen/ops/values_copy_ops.h>
#include <ATen/ops/crow_indices_copy_native.h>
#include <ATen/ops/crow_indices_copy_ops.h>
#include <ATen/ops/crow_indices_copy_native.h>
#include <ATen/ops/crow_indices_copy_ops.h>
#include <ATen/ops/col_indices_copy_native.h>
#include <ATen/ops/col_indices_copy_ops.h>
#include <ATen/ops/col_indices_copy_native.h>
#include <ATen/ops/col_indices_copy_ops.h>
#include <ATen/ops/ccol_indices_copy_native.h>
#include <ATen/ops/ccol_indices_copy_ops.h>
#include <ATen/ops/ccol_indices_copy_native.h>
#include <ATen/ops/ccol_indices_copy_ops.h>
#include <ATen/ops/row_indices_copy_native.h>
#include <ATen/ops/row_indices_copy_ops.h>
#include <ATen/ops/row_indices_copy_native.h>
#include <ATen/ops/row_indices_copy_ops.h>
#include <ATen/ops/unbind_copy_native.h>
#include <ATen/ops/unbind_copy_ops.h>
#include <ATen/ops/unbind_copy_native.h>
#include <ATen/ops/unbind_copy_ops.h>
#include <ATen/ops/view_copy_native.h>
#include <ATen/ops/view_copy_ops.h>
#include <ATen/ops/view_copy_native.h>
#include <ATen/ops/view_copy_ops.h>
#include <ATen/ops/view_copy_native.h>
#include <ATen/ops/view_copy_ops.h>
#include <ATen/ops/view_copy_native.h>
#include <ATen/ops/view_copy_ops.h>
#include <ATen/ops/unfold_copy_native.h>
#include <ATen/ops/unfold_copy_ops.h>
#include <ATen/ops/unfold_copy_native.h>
#include <ATen/ops/unfold_copy_ops.h>
#include <ATen/ops/alias_copy_native.h>
#include <ATen/ops/alias_copy_ops.h>
#include <ATen/ops/alias_copy_native.h>
#include <ATen/ops/alias_copy_ops.h>
#include <ATen/ops/to_padded_tensor_native.h>
#include <ATen/ops/to_padded_tensor_ops.h>
#include <ATen/ops/to_padded_tensor_native.h>
#include <ATen/ops/to_padded_tensor_ops.h>
#include <ATen/ops/_transformer_encoder_layer_fwd_native.h>
#include <ATen/ops/_transformer_encoder_layer_fwd_ops.h>
#include <ATen/ops/_transformer_encoder_layer_fwd_native.h>
#include <ATen/ops/_transformer_encoder_layer_fwd_ops.h>
#include <ATen/ops/_native_multi_head_attention_native.h>
#include <ATen/ops/_native_multi_head_attention_ops.h>
#include <ATen/ops/_native_multi_head_attention_native.h>
#include <ATen/ops/_native_multi_head_attention_ops.h>
#include <ATen/ops/_triton_scaled_dot_attention_native.h>
#include <ATen/ops/_triton_scaled_dot_attention_ops.h>
#include <ATen/ops/_triton_scaled_dot_attention_native.h>
#include <ATen/ops/_triton_scaled_dot_attention_ops.h>
#include <ATen/ops/_triton_multi_head_attention_native.h>
#include <ATen/ops/_triton_multi_head_attention_ops.h>
#include <ATen/ops/_triton_multi_head_attention_native.h>
#include <ATen/ops/_triton_multi_head_attention_ops.h>
#include <ATen/ops/special_airy_ai_native.h>
#include <ATen/ops/special_airy_ai_ops.h>
#include <ATen/ops/special_airy_ai_native.h>
#include <ATen/ops/special_airy_ai_ops.h>
#include <ATen/ops/special_bessel_j0_native.h>
#include <ATen/ops/special_bessel_j0_ops.h>
#include <ATen/ops/special_bessel_j0_native.h>
#include <ATen/ops/special_bessel_j0_ops.h>
#include <ATen/ops/special_bessel_j1_native.h>
#include <ATen/ops/special_bessel_j1_ops.h>
#include <ATen/ops/special_bessel_j1_native.h>
#include <ATen/ops/special_bessel_j1_ops.h>
#include <ATen/ops/special_bessel_y0_native.h>
#include <ATen/ops/special_bessel_y0_ops.h>
#include <ATen/ops/special_bessel_y0_native.h>
#include <ATen/ops/special_bessel_y0_ops.h>
#include <ATen/ops/special_bessel_y1_native.h>
#include <ATen/ops/special_bessel_y1_ops.h>
#include <ATen/ops/special_bessel_y1_native.h>
#include <ATen/ops/special_bessel_y1_ops.h>
#include <ATen/ops/special_chebyshev_polynomial_t_native.h>
#include <ATen/ops/special_chebyshev_polynomial_t_ops.h>
#include <ATen/ops/special_chebyshev_polynomial_t_native.h>
#include <ATen/ops/special_chebyshev_polynomial_t_ops.h>
#include <ATen/ops/special_chebyshev_polynomial_t_native.h>
#include <ATen/ops/special_chebyshev_polynomial_t_ops.h>
#include <ATen/ops/special_chebyshev_polynomial_t_native.h>
#include <ATen/ops/special_chebyshev_polynomial_t_ops.h>
#include <ATen/ops/special_chebyshev_polynomial_t_native.h>
#include <ATen/ops/special_chebyshev_polynomial_t_ops.h>
#include <ATen/ops/special_chebyshev_polynomial_t_native.h>
#include <ATen/ops/special_chebyshev_polynomial_t_ops.h>
#include <ATen/ops/special_chebyshev_polynomial_u_native.h>
#include <ATen/ops/special_chebyshev_polynomial_u_ops.h>
#include <ATen/ops/special_chebyshev_polynomial_u_native.h>
#include <ATen/ops/special_chebyshev_polynomial_u_ops.h>
#include <ATen/ops/special_chebyshev_polynomial_u_native.h>
#include <ATen/ops/special_chebyshev_polynomial_u_ops.h>
#include <ATen/ops/special_chebyshev_polynomial_u_native.h>
#include <ATen/ops/special_chebyshev_polynomial_u_ops.h>
#include <ATen/ops/special_chebyshev_polynomial_u_native.h>
#include <ATen/ops/special_chebyshev_polynomial_u_ops.h>
#include <ATen/ops/special_chebyshev_polynomial_u_native.h>
#include <ATen/ops/special_chebyshev_polynomial_u_ops.h>
#include <ATen/ops/special_chebyshev_polynomial_v_native.h>
#include <ATen/ops/special_chebyshev_polynomial_v_ops.h>
#include <ATen/ops/special_chebyshev_polynomial_v_native.h>
#include <ATen/ops/special_chebyshev_polynomial_v_ops.h>
#include <ATen/ops/special_chebyshev_polynomial_v_native.h>
#include <ATen/ops/special_chebyshev_polynomial_v_ops.h>
#include <ATen/ops/special_chebyshev_polynomial_v_native.h>
#include <ATen/ops/special_chebyshev_polynomial_v_ops.h>
#include <ATen/ops/special_chebyshev_polynomial_v_native.h>
#include <ATen/ops/special_chebyshev_polynomial_v_ops.h>
#include <ATen/ops/special_chebyshev_polynomial_v_native.h>
#include <ATen/ops/special_chebyshev_polynomial_v_ops.h>
#include <ATen/ops/special_chebyshev_polynomial_w_native.h>
#include <ATen/ops/special_chebyshev_polynomial_w_ops.h>
#include <ATen/ops/special_chebyshev_polynomial_w_native.h>
#include <ATen/ops/special_chebyshev_polynomial_w_ops.h>
#include <ATen/ops/special_chebyshev_polynomial_w_native.h>
#include <ATen/ops/special_chebyshev_polynomial_w_ops.h>
#include <ATen/ops/special_chebyshev_polynomial_w_native.h>
#include <ATen/ops/special_chebyshev_polynomial_w_ops.h>
#include <ATen/ops/special_chebyshev_polynomial_w_native.h>
#include <ATen/ops/special_chebyshev_polynomial_w_ops.h>
#include <ATen/ops/special_chebyshev_polynomial_w_native.h>
#include <ATen/ops/special_chebyshev_polynomial_w_ops.h>
#include <ATen/ops/special_hermite_polynomial_h_native.h>
#include <ATen/ops/special_hermite_polynomial_h_ops.h>
#include <ATen/ops/special_hermite_polynomial_h_native.h>
#include <ATen/ops/special_hermite_polynomial_h_ops.h>
#include <ATen/ops/special_hermite_polynomial_h_native.h>
#include <ATen/ops/special_hermite_polynomial_h_ops.h>
#include <ATen/ops/special_hermite_polynomial_h_native.h>
#include <ATen/ops/special_hermite_polynomial_h_ops.h>
#include <ATen/ops/special_hermite_polynomial_h_native.h>
#include <ATen/ops/special_hermite_polynomial_h_ops.h>
#include <ATen/ops/special_hermite_polynomial_h_native.h>
#include <ATen/ops/special_hermite_polynomial_h_ops.h>
#include <ATen/ops/special_hermite_polynomial_he_native.h>
#include <ATen/ops/special_hermite_polynomial_he_ops.h>
#include <ATen/ops/special_hermite_polynomial_he_native.h>
#include <ATen/ops/special_hermite_polynomial_he_ops.h>
#include <ATen/ops/special_hermite_polynomial_he_native.h>
#include <ATen/ops/special_hermite_polynomial_he_ops.h>
#include <ATen/ops/special_hermite_polynomial_he_native.h>
#include <ATen/ops/special_hermite_polynomial_he_ops.h>
#include <ATen/ops/special_hermite_polynomial_he_native.h>
#include <ATen/ops/special_hermite_polynomial_he_ops.h>
#include <ATen/ops/special_hermite_polynomial_he_native.h>
#include <ATen/ops/special_hermite_polynomial_he_ops.h>
#include <ATen/ops/special_laguerre_polynomial_l_native.h>
#include <ATen/ops/special_laguerre_polynomial_l_ops.h>
#include <ATen/ops/special_laguerre_polynomial_l_native.h>
#include <ATen/ops/special_laguerre_polynomial_l_ops.h>
#include <ATen/ops/special_laguerre_polynomial_l_native.h>
#include <ATen/ops/special_laguerre_polynomial_l_ops.h>
#include <ATen/ops/special_laguerre_polynomial_l_native.h>
#include <ATen/ops/special_laguerre_polynomial_l_ops.h>
#include <ATen/ops/special_laguerre_polynomial_l_native.h>
#include <ATen/ops/special_laguerre_polynomial_l_ops.h>
#include <ATen/ops/special_laguerre_polynomial_l_native.h>
#include <ATen/ops/special_laguerre_polynomial_l_ops.h>
#include <ATen/ops/special_legendre_polynomial_p_native.h>
#include <ATen/ops/special_legendre_polynomial_p_ops.h>
#include <ATen/ops/special_legendre_polynomial_p_native.h>
#include <ATen/ops/special_legendre_polynomial_p_ops.h>
#include <ATen/ops/special_legendre_polynomial_p_native.h>
#include <ATen/ops/special_legendre_polynomial_p_ops.h>
#include <ATen/ops/special_legendre_polynomial_p_native.h>
#include <ATen/ops/special_legendre_polynomial_p_ops.h>
#include <ATen/ops/special_legendre_polynomial_p_native.h>
#include <ATen/ops/special_legendre_polynomial_p_ops.h>
#include <ATen/ops/special_legendre_polynomial_p_native.h>
#include <ATen/ops/special_legendre_polynomial_p_ops.h>
#include <ATen/ops/special_modified_bessel_i0_native.h>
#include <ATen/ops/special_modified_bessel_i0_ops.h>
#include <ATen/ops/special_modified_bessel_i0_native.h>
#include <ATen/ops/special_modified_bessel_i0_ops.h>
#include <ATen/ops/special_modified_bessel_i1_native.h>
#include <ATen/ops/special_modified_bessel_i1_ops.h>
#include <ATen/ops/special_modified_bessel_i1_native.h>
#include <ATen/ops/special_modified_bessel_i1_ops.h>
#include <ATen/ops/special_modified_bessel_k0_native.h>
#include <ATen/ops/special_modified_bessel_k0_ops.h>
#include <ATen/ops/special_modified_bessel_k0_native.h>
#include <ATen/ops/special_modified_bessel_k0_ops.h>
#include <ATen/ops/special_modified_bessel_k1_native.h>
#include <ATen/ops/special_modified_bessel_k1_ops.h>
#include <ATen/ops/special_modified_bessel_k1_native.h>
#include <ATen/ops/special_modified_bessel_k1_ops.h>
#include <ATen/ops/special_scaled_modified_bessel_k0_native.h>
#include <ATen/ops/special_scaled_modified_bessel_k0_ops.h>
#include <ATen/ops/special_scaled_modified_bessel_k0_native.h>
#include <ATen/ops/special_scaled_modified_bessel_k0_ops.h>
#include <ATen/ops/special_scaled_modified_bessel_k1_native.h>
#include <ATen/ops/special_scaled_modified_bessel_k1_ops.h>
#include <ATen/ops/special_scaled_modified_bessel_k1_native.h>
#include <ATen/ops/special_scaled_modified_bessel_k1_ops.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_ops.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_ops.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_ops.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_ops.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_ops.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_t_ops.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_ops.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_ops.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_ops.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_ops.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_ops.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_u_ops.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_ops.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_ops.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_ops.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_ops.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_ops.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_v_ops.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_ops.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_ops.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_ops.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_ops.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_ops.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_native.h>
#include <ATen/ops/special_shifted_chebyshev_polynomial_w_ops.h>
#include <ATen/ops/special_spherical_bessel_j0_native.h>
#include <ATen/ops/special_spherical_bessel_j0_ops.h>
#include <ATen/ops/special_spherical_bessel_j0_native.h>
#include <ATen/ops/special_spherical_bessel_j0_ops.h>
#include <ATen/ops/_foobar_native.h>
#include <ATen/ops/_foobar_ops.h>
#include <ATen/ops/_foobar_native.h>
#include <ATen/ops/_foobar_ops.h>
#include <ATen/ops/_fused_adam_native.h>
#include <ATen/ops/_fused_adam_ops.h>
#include <ATen/ops/_fused_adam_native.h>
#include <ATen/ops/_fused_adam_ops.h>
#include <ATen/ops/_fused_adam_native.h>
#include <ATen/ops/_fused_adam_ops.h>
#include <ATen/ops/_fused_adam_native.h>
#include <ATen/ops/_fused_adam_ops.h>
#include <ATen/ops/_fused_adam_native.h>
#include <ATen/ops/_fused_adam_ops.h>
#include <ATen/ops/_fused_adam_native.h>
#include <ATen/ops/_fused_adam_ops.h>
#include <ATen/ops/_fused_adamw_native.h>
#include <ATen/ops/_fused_adamw_ops.h>
#include <ATen/ops/_fused_adamw_native.h>
#include <ATen/ops/_fused_adamw_ops.h>
#include <ATen/ops/_fused_adamw_native.h>
#include <ATen/ops/_fused_adamw_ops.h>
#include <ATen/ops/_fused_adamw_native.h>
#include <ATen/ops/_fused_adamw_ops.h>
#include <ATen/ops/_fused_adamw_native.h>
#include <ATen/ops/_fused_adamw_ops.h>
#include <ATen/ops/_fused_adamw_native.h>
#include <ATen/ops/_fused_adamw_ops.h>
#include <ATen/ops/_fused_sgd_native.h>
#include <ATen/ops/_fused_sgd_ops.h>
#include <ATen/ops/_fused_sgd_native.h>
#include <ATen/ops/_fused_sgd_ops.h>
#include <ATen/ops/_fused_sgd_native.h>
#include <ATen/ops/_fused_sgd_ops.h>
#include <ATen/ops/_fused_sgd_native.h>
#include <ATen/ops/_fused_sgd_ops.h>
#include <ATen/ops/_fused_sgd_native.h>
#include <ATen/ops/_fused_sgd_ops.h>
#include <ATen/ops/_fused_sgd_native.h>
#include <ATen/ops/_fused_sgd_ops.h>
#include <ATen/ops/_fused_adagrad_native.h>
#include <ATen/ops/_fused_adagrad_ops.h>
#include <ATen/ops/_fused_adagrad_native.h>
#include <ATen/ops/_fused_adagrad_ops.h>
#include <ATen/ops/_fused_adagrad_native.h>
#include <ATen/ops/_fused_adagrad_ops.h>
#include <ATen/ops/_fw_primal_native.h>
#include <ATen/ops/_fw_primal_ops.h>
#include <ATen/ops/_fw_primal_copy_native.h>
#include <ATen/ops/_fw_primal_copy_ops.h>
#include <ATen/ops/_make_dual_native.h>
#include <ATen/ops/_make_dual_ops.h>
#include <ATen/ops/_make_dual_copy_native.h>
#include <ATen/ops/_make_dual_copy_ops.h>
#include <ATen/ops/_unpack_dual_native.h>
#include <ATen/ops/_unpack_dual_ops.h>
#include <ATen/ops/rename_native.h>
#include <ATen/ops/rename_ops.h>
#include <ATen/ops/align_to_native.h>
#include <ATen/ops/align_to_ops.h>
#include <ATen/ops/align_to_native.h>
#include <ATen/ops/align_to_ops.h>
#include <ATen/ops/refine_names_native.h>
#include <ATen/ops/refine_names_ops.h>
#include <ATen/ops/view_as_real_native.h>
#include <ATen/ops/view_as_real_ops.h>
#include <ATen/ops/view_as_real_copy_native.h>
#include <ATen/ops/view_as_real_copy_ops.h>
#include <ATen/ops/view_as_complex_native.h>
#include <ATen/ops/view_as_complex_ops.h>
#include <ATen/ops/view_as_complex_copy_native.h>
#include <ATen/ops/view_as_complex_copy_ops.h>
#include <ATen/ops/real_native.h>
#include <ATen/ops/real_ops.h>
#include <ATen/ops/imag_native.h>
#include <ATen/ops/imag_ops.h>
#include <ATen/ops/_conj_native.h>
#include <ATen/ops/_conj_ops.h>
#include <ATen/ops/_conj_copy_native.h>
#include <ATen/ops/_conj_copy_ops.h>
#include <ATen/ops/conj_native.h>
#include <ATen/ops/conj_ops.h>
#include <ATen/ops/resolve_conj_native.h>
#include <ATen/ops/resolve_conj_ops.h>
#include <ATen/ops/resolve_neg_native.h>
#include <ATen/ops/resolve_neg_ops.h>
#include <ATen/ops/_neg_view_native.h>
#include <ATen/ops/_neg_view_ops.h>
#include <ATen/ops/_neg_view_copy_native.h>
#include <ATen/ops/_neg_view_copy_ops.h>
#include <ATen/ops/as_strided_native.h>
#include <ATen/ops/as_strided_ops.h>
#include <ATen/ops/as_strided_copy_native.h>
#include <ATen/ops/as_strided_copy_ops.h>
#include <ATen/ops/broadcast_to_native.h>
#include <ATen/ops/broadcast_to_ops.h>
#include <ATen/ops/_sparse_broadcast_to_native.h>
#include <ATen/ops/_sparse_broadcast_to_ops.h>
#include <ATen/ops/_sparse_broadcast_to_copy_native.h>
#include <ATen/ops/_sparse_broadcast_to_copy_ops.h>
#include <ATen/ops/chunk_native.h>
#include <ATen/ops/chunk_ops.h>
#include <ATen/ops/tensor_split_native.h>
#include <ATen/ops/tensor_split_ops.h>
#include <ATen/ops/tensor_split_native.h>
#include <ATen/ops/tensor_split_ops.h>
#include <ATen/ops/tensor_split_native.h>
#include <ATen/ops/tensor_split_ops.h>
#include <ATen/ops/contiguous_native.h>
#include <ATen/ops/contiguous_ops.h>
#include <ATen/ops/diagonal_native.h>
#include <ATen/ops/diagonal_ops.h>
#include <ATen/ops/diagonal_copy_native.h>
#include <ATen/ops/diagonal_copy_ops.h>
#include <ATen/ops/linalg_diagonal_native.h>
#include <ATen/ops/linalg_diagonal_ops.h>
#include <ATen/ops/diagonal_native.h>
#include <ATen/ops/diagonal_ops.h>
#include <ATen/ops/expand_native.h>
#include <ATen/ops/expand_ops.h>
#include <ATen/ops/expand_copy_native.h>
#include <ATen/ops/expand_copy_ops.h>
#include <ATen/ops/expand_as_native.h>
#include <ATen/ops/expand_as_ops.h>
#include <ATen/ops/flatten_native.h>
#include <ATen/ops/flatten_ops.h>
#include <ATen/ops/flatten_native.h>
#include <ATen/ops/flatten_ops.h>
#include <ATen/ops/flatten_native.h>
#include <ATen/ops/flatten_ops.h>
#include <ATen/ops/flatten_native.h>
#include <ATen/ops/flatten_ops.h>
#include <ATen/ops/unflatten_native.h>
#include <ATen/ops/unflatten_ops.h>
#include <ATen/ops/unflatten_native.h>
#include <ATen/ops/unflatten_ops.h>
#include <ATen/ops/narrow_native.h>
#include <ATen/ops/narrow_ops.h>
#include <ATen/ops/narrow_copy_native.h>
#include <ATen/ops/narrow_copy_ops.h>
#include <ATen/ops/narrow_native.h>
#include <ATen/ops/narrow_ops.h>
#include <ATen/ops/permute_native.h>
#include <ATen/ops/permute_ops.h>
#include <ATen/ops/permute_copy_native.h>
#include <ATen/ops/permute_copy_ops.h>
#include <ATen/ops/movedim_native.h>
#include <ATen/ops/movedim_ops.h>
#include <ATen/ops/movedim_native.h>
#include <ATen/ops/movedim_ops.h>
#include <ATen/ops/moveaxis_native.h>
#include <ATen/ops/moveaxis_ops.h>
#include <ATen/ops/moveaxis_native.h>
#include <ATen/ops/moveaxis_ops.h>
#include <ATen/ops/numpy_T_native.h>
#include <ATen/ops/numpy_T_ops.h>
#include <ATen/ops/matrix_H_native.h>
#include <ATen/ops/matrix_H_ops.h>
#include <ATen/ops/mT_native.h>
#include <ATen/ops/mT_ops.h>
#include <ATen/ops/mH_native.h>
#include <ATen/ops/mH_ops.h>
#include <ATen/ops/adjoint_native.h>
#include <ATen/ops/adjoint_ops.h>
#include <ATen/ops/pin_memory_native.h>
#include <ATen/ops/pin_memory_ops.h>
#include <ATen/ops/ravel_native.h>
#include <ATen/ops/ravel_ops.h>
#include <ATen/ops/reshape_native.h>
#include <ATen/ops/reshape_ops.h>
#include <ATen/ops/_reshape_alias_native.h>
#include <ATen/ops/_reshape_alias_ops.h>
#include <ATen/ops/_reshape_alias_copy_native.h>
#include <ATen/ops/_reshape_alias_copy_ops.h>
#include <ATen/ops/reshape_as_native.h>
#include <ATen/ops/reshape_as_ops.h>
#include <ATen/ops/select_native.h>
#include <ATen/ops/select_ops.h>
#include <ATen/ops/select_native.h>
#include <ATen/ops/select_ops.h>
#include <ATen/ops/select_copy_native.h>
#include <ATen/ops/select_copy_ops.h>
#include <ATen/ops/detach_native.h>
#include <ATen/ops/detach_ops.h>
#include <ATen/ops/detach_copy_native.h>
#include <ATen/ops/detach_copy_ops.h>
#include <ATen/ops/slice_native.h>
#include <ATen/ops/slice_ops.h>
#include <ATen/ops/slice_copy_native.h>
#include <ATen/ops/slice_copy_ops.h>
#include <ATen/ops/slice_inverse_native.h>
#include <ATen/ops/slice_inverse_ops.h>
#include <ATen/ops/slice_scatter_native.h>
#include <ATen/ops/slice_scatter_ops.h>
#include <ATen/ops/split_native.h>
#include <ATen/ops/split_ops.h>
#include <ATen/ops/split_copy_native.h>
#include <ATen/ops/split_copy_ops.h>
#include <ATen/ops/split_native.h>
#include <ATen/ops/split_ops.h>
#include <ATen/ops/split_with_sizes_native.h>
#include <ATen/ops/split_with_sizes_ops.h>
#include <ATen/ops/split_with_sizes_copy_native.h>
#include <ATen/ops/split_with_sizes_copy_ops.h>
#include <ATen/ops/hsplit_native.h>
#include <ATen/ops/hsplit_ops.h>
#include <ATen/ops/hsplit_native.h>
#include <ATen/ops/hsplit_ops.h>
#include <ATen/ops/vsplit_native.h>
#include <ATen/ops/vsplit_ops.h>
#include <ATen/ops/vsplit_native.h>
#include <ATen/ops/vsplit_ops.h>
#include <ATen/ops/dsplit_native.h>
#include <ATen/ops/dsplit_ops.h>
#include <ATen/ops/dsplit_native.h>
#include <ATen/ops/dsplit_ops.h>
#include <ATen/ops/squeeze_native.h>
#include <ATen/ops/squeeze_ops.h>
#include <ATen/ops/squeeze_copy_native.h>
#include <ATen/ops/squeeze_copy_ops.h>
#include <ATen/ops/squeeze_native.h>
#include <ATen/ops/squeeze_ops.h>
#include <ATen/ops/squeeze_copy_native.h>
#include <ATen/ops/squeeze_copy_ops.h>
#include <ATen/ops/squeeze_native.h>
#include <ATen/ops/squeeze_ops.h>
#include <ATen/ops/squeeze_native.h>
#include <ATen/ops/squeeze_ops.h>
#include <ATen/ops/squeeze_copy_native.h>
#include <ATen/ops/squeeze_copy_ops.h>
#include <ATen/ops/t_native.h>
#include <ATen/ops/t_ops.h>
#include <ATen/ops/t_copy_native.h>
#include <ATen/ops/t_copy_ops.h>
#include <ATen/ops/transpose_native.h>
#include <ATen/ops/transpose_ops.h>
#include <ATen/ops/transpose_copy_native.h>
#include <ATen/ops/transpose_copy_ops.h>
#include <ATen/ops/transpose_native.h>
#include <ATen/ops/transpose_ops.h>
#include <ATen/ops/_nested_view_from_buffer_native.h>
#include <ATen/ops/_nested_view_from_buffer_ops.h>
#include <ATen/ops/_nested_view_from_buffer_copy_native.h>
#include <ATen/ops/_nested_view_from_buffer_copy_ops.h>
#include <ATen/ops/_nested_view_from_jagged_native.h>
#include <ATen/ops/_nested_view_from_jagged_ops.h>
#include <ATen/ops/_nested_view_from_jagged_copy_native.h>
#include <ATen/ops/_nested_view_from_jagged_copy_ops.h>
#include <ATen/ops/_nested_get_values_native.h>
#include <ATen/ops/_nested_get_values_ops.h>
#include <ATen/ops/_nested_get_values_copy_native.h>
#include <ATen/ops/_nested_get_values_copy_ops.h>
#include <ATen/ops/unsqueeze_native.h>
#include <ATen/ops/unsqueeze_ops.h>
#include <ATen/ops/unsqueeze_copy_native.h>
#include <ATen/ops/unsqueeze_copy_ops.h>
#include <ATen/ops/view_as_native.h>
#include <ATen/ops/view_as_ops.h>
#include <ATen/ops/positive_native.h>
#include <ATen/ops/positive_ops.h>
#include <ATen/ops/coalesce_native.h>
#include <ATen/ops/coalesce_ops.h>
#include <ATen/ops/_indices_native.h>
#include <ATen/ops/_indices_ops.h>
#include <ATen/ops/_indices_copy_native.h>
#include <ATen/ops/_indices_copy_ops.h>
#include <ATen/ops/_values_native.h>
#include <ATen/ops/_values_ops.h>
#include <ATen/ops/_values_copy_native.h>
#include <ATen/ops/_values_copy_ops.h>
#include <ATen/ops/indices_native.h>
#include <ATen/ops/indices_ops.h>
#include <ATen/ops/indices_copy_native.h>
#include <ATen/ops/indices_copy_ops.h>
#include <ATen/ops/values_native.h>
#include <ATen/ops/values_ops.h>
#include <ATen/ops/values_copy_native.h>
#include <ATen/ops/values_copy_ops.h>
#include <ATen/ops/crow_indices_native.h>
#include <ATen/ops/crow_indices_ops.h>
#include <ATen/ops/crow_indices_copy_native.h>
#include <ATen/ops/crow_indices_copy_ops.h>
#include <ATen/ops/col_indices_native.h>
#include <ATen/ops/col_indices_ops.h>
#include <ATen/ops/col_indices_copy_native.h>
#include <ATen/ops/col_indices_copy_ops.h>
#include <ATen/ops/ccol_indices_native.h>
#include <ATen/ops/ccol_indices_ops.h>
#include <ATen/ops/ccol_indices_copy_native.h>
#include <ATen/ops/ccol_indices_copy_ops.h>
#include <ATen/ops/row_indices_native.h>
#include <ATen/ops/row_indices_ops.h>
#include <ATen/ops/row_indices_copy_native.h>
#include <ATen/ops/row_indices_copy_ops.h>
#include <ATen/ops/unbind_native.h>
#include <ATen/ops/unbind_ops.h>
#include <ATen/ops/unbind_copy_native.h>
#include <ATen/ops/unbind_copy_ops.h>
#include <ATen/ops/unbind_native.h>
#include <ATen/ops/unbind_ops.h>
#include <ATen/ops/_autocast_to_reduced_precision_native.h>
#include <ATen/ops/_autocast_to_reduced_precision_ops.h>
#include <ATen/ops/_autocast_to_full_precision_native.h>
#include <ATen/ops/_autocast_to_full_precision_ops.h>
#include <ATen/ops/to_native.h>
#include <ATen/ops/to_ops.h>
#include <ATen/ops/to_native.h>
#include <ATen/ops/to_ops.h>
#include <ATen/ops/to_native.h>
#include <ATen/ops/to_ops.h>
#include <ATen/ops/to_native.h>
#include <ATen/ops/to_ops.h>
#include <ATen/ops/lift_fresh_native.h>
#include <ATen/ops/lift_fresh_ops.h>
#include <ATen/ops/lift_fresh_copy_native.h>
#include <ATen/ops/lift_fresh_copy_ops.h>
#include <ATen/ops/view_native.h>
#include <ATen/ops/view_ops.h>
#include <ATen/ops/view_copy_native.h>
#include <ATen/ops/view_copy_ops.h>
#include <ATen/ops/view_native.h>
#include <ATen/ops/view_ops.h>
#include <ATen/ops/view_copy_native.h>
#include <ATen/ops/view_copy_ops.h>
#include <ATen/ops/swapaxes_native.h>
#include <ATen/ops/swapaxes_ops.h>
#include <ATen/ops/swapdims_native.h>
#include <ATen/ops/swapdims_ops.h>
#include <ATen/ops/unfold_native.h>
#include <ATen/ops/unfold_ops.h>
#include <ATen/ops/unfold_copy_native.h>
#include <ATen/ops/unfold_copy_ops.h>
#include <ATen/ops/alias_native.h>
#include <ATen/ops/alias_ops.h>
#include <ATen/ops/alias_copy_native.h>
#include <ATen/ops/alias_copy_ops.h>
#include <ATen/ops/_test_autograd_multiple_dispatch_view_native.h>
#include <ATen/ops/_test_autograd_multiple_dispatch_view_ops.h>
#include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_native.h>
#include <ATen/ops/_test_autograd_multiple_dispatch_view_copy_ops.h>
#include <ATen/ops/_cast_Byte_native.h>
#include <ATen/ops/_cast_Byte_ops.h>
#include <ATen/ops/_cast_Char_native.h>
#include <ATen/ops/_cast_Char_ops.h>
#include <ATen/ops/_cast_Double_native.h>
#include <ATen/ops/_cast_Double_ops.h>
#include <ATen/ops/_cast_Float_native.h>
#include <ATen/ops/_cast_Float_ops.h>
#include <ATen/ops/_cast_Int_native.h>
#include <ATen/ops/_cast_Int_ops.h>
#include <ATen/ops/_cast_Long_native.h>
#include <ATen/ops/_cast_Long_ops.h>
#include <ATen/ops/_cast_Short_native.h>
#include <ATen/ops/_cast_Short_ops.h>
#include <ATen/ops/_cast_Half_native.h>
#include <ATen/ops/_cast_Half_ops.h>
#include <ATen/ops/_backward_native.h>
#include <ATen/ops/_backward_ops.h>
#include <ATen/ops/set_data_native.h>
#include <ATen/ops/set_data_ops.h>
#include <ATen/ops/data_native.h>
#include <ATen/ops/data_ops.h>
#include <ATen/ops/is_leaf_native.h>
#include <ATen/ops/is_leaf_ops.h>
#include <ATen/ops/output_nr_native.h>
#include <ATen/ops/output_nr_ops.h>
#include <ATen/ops/_version_native.h>
#include <ATen/ops/_version_ops.h>
#include <ATen/ops/requires_grad_native.h>
#include <ATen/ops/requires_grad_ops.h>
#include <ATen/ops/retain_grad_native.h>
#include <ATen/ops/retain_grad_ops.h>
#include <ATen/ops/retains_grad_native.h>
#include <ATen/ops/retains_grad_ops.h>
#include <ATen/ops/_has_same_storage_numel_native.h>
#include <ATen/ops/_has_same_storage_numel_ops.h>
#include <ATen/ops/align_as_native.h>
#include <ATen/ops/align_as_ops.h>
#include <ATen/ops/align_tensors_native.h>
#include <ATen/ops/align_tensors_ops.h>
#include <ATen/ops/_assert_async_native.h>
#include <ATen/ops/_assert_async_ops.h>
#include <ATen/ops/_assert_async_native.h>
#include <ATen/ops/_assert_async_ops.h>
#include <ATen/ops/_assert_scalar_native.h>
#include <ATen/ops/_assert_scalar_ops.h>
#include <ATen/ops/_functional_assert_scalar_native.h>
#include <ATen/ops/_functional_assert_scalar_ops.h>
#include <ATen/ops/_functional_assert_async_native.h>
#include <ATen/ops/_functional_assert_async_ops.h>
#include <ATen/ops/_assert_tensor_metadata_native.h>
#include <ATen/ops/_assert_tensor_metadata_ops.h>
#include <ATen/ops/_print_native.h>
#include <ATen/ops/_print_ops.h>
#include <ATen/ops/sym_constrain_range_native.h>
#include <ATen/ops/sym_constrain_range_ops.h>
#include <ATen/ops/sym_constrain_range_for_size_native.h>
#include <ATen/ops/sym_constrain_range_for_size_ops.h>
#include <ATen/ops/_functional_sym_constrain_range_native.h>
#include <ATen/ops/_functional_sym_constrain_range_ops.h>
#include <ATen/ops/_functional_sym_constrain_range_for_size_native.h>
#include <ATen/ops/_functional_sym_constrain_range_for_size_ops.h>
#include <ATen/ops/_make_dep_token_native.h>
#include <ATen/ops/_make_dep_token_ops.h>
#include <ATen/ops/_use_cudnn_ctc_loss_native.h>
#include <ATen/ops/_use_cudnn_ctc_loss_ops.h>
#include <ATen/ops/_use_cudnn_ctc_loss_native.h>
#include <ATen/ops/_use_cudnn_ctc_loss_ops.h>
#include <ATen/ops/_cudnn_ctc_loss_native.h>
#include <ATen/ops/_cudnn_ctc_loss_ops.h>
#include <ATen/ops/_use_cudnn_rnn_flatten_weight_native.h>
#include <ATen/ops/_use_cudnn_rnn_flatten_weight_ops.h>
#include <ATen/ops/_debug_has_internal_overlap_native.h>
#include <ATen/ops/_debug_has_internal_overlap_ops.h>
#include <ATen/ops/_sobol_engine_draw_native.h>
#include <ATen/ops/_sobol_engine_draw_ops.h>
#include <ATen/ops/_sobol_engine_ff_native.h>
#include <ATen/ops/_sobol_engine_ff_ops.h>
#include <ATen/ops/_sobol_engine_scramble_native.h>
#include <ATen/ops/_sobol_engine_scramble_ops.h>
#include <ATen/ops/_sobol_engine_initialize_state_native.h>
#include <ATen/ops/_sobol_engine_initialize_state_ops.h>
#include <ATen/ops/_reshape_from_tensor_native.h>
#include <ATen/ops/_reshape_from_tensor_ops.h>
#include <ATen/ops/_shape_as_tensor_native.h>
#include <ATen/ops/_shape_as_tensor_ops.h>
#include <ATen/ops/dropout_native.h>
#include <ATen/ops/dropout_ops.h>
#include <ATen/ops/dropout_native.h>
#include <ATen/ops/dropout_ops.h>
#include <ATen/ops/feature_dropout_native.h>
#include <ATen/ops/feature_dropout_ops.h>
#include <ATen/ops/feature_dropout_native.h>
#include <ATen/ops/feature_dropout_ops.h>
#include <ATen/ops/alpha_dropout_native.h>
#include <ATen/ops/alpha_dropout_ops.h>
#include <ATen/ops/alpha_dropout_native.h>
#include <ATen/ops/alpha_dropout_ops.h>
#include <ATen/ops/feature_alpha_dropout_native.h>
#include <ATen/ops/feature_alpha_dropout_ops.h>
#include <ATen/ops/feature_alpha_dropout_native.h>
#include <ATen/ops/feature_alpha_dropout_ops.h>
#include <ATen/ops/chalf_native.h>
#include <ATen/ops/chalf_ops.h>
#include <ATen/ops/adaptive_max_pool1d_native.h>
#include <ATen/ops/adaptive_max_pool1d_ops.h>
#include <ATen/ops/affine_grid_generator_backward_native.h>
#include <ATen/ops/affine_grid_generator_backward_ops.h>
#include <ATen/ops/_is_all_true_native.h>
#include <ATen/ops/_is_all_true_ops.h>
#include <ATen/ops/_is_any_true_native.h>
#include <ATen/ops/_is_any_true_ops.h>
#include <ATen/ops/_test_check_tensor_native.h>
#include <ATen/ops/_test_check_tensor_ops.h>
#include <ATen/ops/allclose_native.h>
#include <ATen/ops/allclose_ops.h>
#include <ATen/ops/arange_native.h>
#include <ATen/ops/arange_ops.h>
#include <ATen/ops/_dim_arange_native.h>
#include <ATen/ops/_dim_arange_ops.h>
#include <ATen/ops/atleast_1d_native.h>
#include <ATen/ops/atleast_1d_ops.h>
#include <ATen/ops/atleast_1d_native.h>
#include <ATen/ops/atleast_1d_ops.h>
#include <ATen/ops/atleast_2d_native.h>
#include <ATen/ops/atleast_2d_ops.h>
#include <ATen/ops/atleast_2d_native.h>
#include <ATen/ops/atleast_2d_ops.h>
#include <ATen/ops/atleast_3d_native.h>
#include <ATen/ops/atleast_3d_ops.h>
#include <ATen/ops/atleast_3d_native.h>
#include <ATen/ops/atleast_3d_ops.h>
#include <ATen/ops/batch_norm_native.h>
#include <ATen/ops/batch_norm_ops.h>
#include <ATen/ops/_batch_norm_impl_index_native.h>
#include <ATen/ops/_batch_norm_impl_index_ops.h>
#include <ATen/ops/_batch_norm_impl_index_backward_native.h>
#include <ATen/ops/_batch_norm_impl_index_backward_ops.h>
#include <ATen/ops/bilinear_native.h>
#include <ATen/ops/bilinear_ops.h>
#include <ATen/ops/_lazy_clone_native.h>
#include <ATen/ops/_lazy_clone_ops.h>
#include <ATen/ops/broadcast_tensors_native.h>
#include <ATen/ops/broadcast_tensors_ops.h>
#include <ATen/ops/unsafe_chunk_native.h>
#include <ATen/ops/unsafe_chunk_ops.h>
#include <ATen/ops/cudnn_is_acceptable_native.h>
#include <ATen/ops/cudnn_is_acceptable_ops.h>
#include <ATen/ops/_convolution_native.h>
#include <ATen/ops/_convolution_ops.h>
#include <ATen/ops/_convolution_mode_native.h>
#include <ATen/ops/_convolution_mode_ops.h>
#include <ATen/ops/_convolution_double_backward_native.h>
#include <ATen/ops/_convolution_double_backward_ops.h>
#include <ATen/ops/conv1d_native.h>
#include <ATen/ops/conv1d_ops.h>
#include <ATen/ops/conv2d_native.h>
#include <ATen/ops/conv2d_ops.h>
#include <ATen/ops/conv3d_native.h>
#include <ATen/ops/conv3d_ops.h>
#include <ATen/ops/conv1d_native.h>
#include <ATen/ops/conv1d_ops.h>
#include <ATen/ops/conv2d_native.h>
#include <ATen/ops/conv2d_ops.h>
#include <ATen/ops/conv3d_native.h>
#include <ATen/ops/conv3d_ops.h>
#include <ATen/ops/conv_tbc_backward_native.h>
#include <ATen/ops/conv_tbc_backward_ops.h>
#include <ATen/ops/conv_transpose1d_native.h>
#include <ATen/ops/conv_transpose1d_ops.h>
#include <ATen/ops/conv_transpose2d_native.h>
#include <ATen/ops/conv_transpose2d_ops.h>
#include <ATen/ops/conv_transpose3d_native.h>
#include <ATen/ops/conv_transpose3d_ops.h>
#include <ATen/ops/cosine_embedding_loss_native.h>
#include <ATen/ops/cosine_embedding_loss_ops.h>
#include <ATen/ops/cov_native.h>
#include <ATen/ops/cov_ops.h>
#include <ATen/ops/corrcoef_native.h>
#include <ATen/ops/corrcoef_ops.h>
#include <ATen/ops/_cummax_helper_native.h>
#include <ATen/ops/_cummax_helper_ops.h>
#include <ATen/ops/_cummin_helper_native.h>
#include <ATen/ops/_cummin_helper_ops.h>
#include <ATen/ops/cummaxmin_backward_native.h>
#include <ATen/ops/cummaxmin_backward_ops.h>
#include <ATen/ops/cumprod_backward_native.h>
#include <ATen/ops/cumprod_backward_ops.h>
#include <ATen/ops/cumulative_trapezoid_native.h>
#include <ATen/ops/cumulative_trapezoid_ops.h>
#include <ATen/ops/cumulative_trapezoid_native.h>
#include <ATen/ops/cumulative_trapezoid_ops.h>
#include <ATen/ops/ctc_loss_native.h>
#include <ATen/ops/ctc_loss_ops.h>
#include <ATen/ops/ctc_loss_native.h>
#include <ATen/ops/ctc_loss_ops.h>
#include <ATen/ops/_ctc_loss_backward_native.h>
#include <ATen/ops/_ctc_loss_backward_ops.h>
#include <ATen/ops/diagflat_native.h>
#include <ATen/ops/diagflat_ops.h>
#include <ATen/ops/fill_diagonal_native.h>
#include <ATen/ops/fill_diagonal_ops.h>
#include <ATen/ops/gradient_native.h>
#include <ATen/ops/gradient_ops.h>
#include <ATen/ops/gradient_native.h>
#include <ATen/ops/gradient_ops.h>
#include <ATen/ops/gradient_native.h>
#include <ATen/ops/gradient_ops.h>
#include <ATen/ops/gradient_native.h>
#include <ATen/ops/gradient_ops.h>
#include <ATen/ops/gradient_native.h>
#include <ATen/ops/gradient_ops.h>
#include <ATen/ops/gradient_native.h>
#include <ATen/ops/gradient_ops.h>
#include <ATen/ops/gradient_native.h>
#include <ATen/ops/gradient_ops.h>
#include <ATen/ops/divide_native.h>
#include <ATen/ops/divide_ops.h>
#include <ATen/ops/divide_native.h>
#include <ATen/ops/divide_ops.h>
#include <ATen/ops/divide_native.h>
#include <ATen/ops/divide_ops.h>
#include <ATen/ops/divide_native.h>
#include <ATen/ops/divide_ops.h>
#include <ATen/ops/true_divide_native.h>
#include <ATen/ops/true_divide_ops.h>
#include <ATen/ops/true_divide_native.h>
#include <ATen/ops/true_divide_ops.h>
#include <ATen/ops/einsum_native.h>
#include <ATen/ops/einsum_ops.h>
#include <ATen/ops/embedding_backward_native.h>
#include <ATen/ops/embedding_backward_ops.h>
#include <ATen/ops/embedding_sparse_backward_native.h>
#include <ATen/ops/embedding_sparse_backward_ops.h>
#include <ATen/ops/_rowwise_prune_native.h>
#include <ATen/ops/_rowwise_prune_ops.h>
#include <ATen/ops/embedding_bag_native.h>
#include <ATen/ops/embedding_bag_ops.h>
#include <ATen/ops/embedding_bag_native.h>
#include <ATen/ops/embedding_bag_ops.h>
#include <ATen/ops/_embedding_bag_backward_native.h>
#include <ATen/ops/_embedding_bag_backward_ops.h>
#include <ATen/ops/_embedding_bag_sparse_backward_native.h>
#include <ATen/ops/_embedding_bag_sparse_backward_ops.h>
#include <ATen/ops/grid_sampler_native.h>
#include <ATen/ops/grid_sampler_ops.h>
#include <ATen/ops/_grid_sampler_2d_cpu_fallback_backward_native.h>
#include <ATen/ops/_grid_sampler_2d_cpu_fallback_backward_ops.h>
#include <ATen/ops/hinge_embedding_loss_native.h>
#include <ATen/ops/hinge_embedding_loss_ops.h>
#include <ATen/ops/group_norm_native.h>
#include <ATen/ops/group_norm_ops.h>
#include <ATen/ops/_validate_compressed_sparse_indices_native.h>
#include <ATen/ops/_validate_compressed_sparse_indices_ops.h>
#include <ATen/ops/_cufft_get_plan_cache_size_native.h>
#include <ATen/ops/_cufft_get_plan_cache_size_ops.h>
#include <ATen/ops/_cufft_get_plan_cache_max_size_native.h>
#include <ATen/ops/_cufft_get_plan_cache_max_size_ops.h>
#include <ATen/ops/_cufft_set_plan_cache_max_size_native.h>
#include <ATen/ops/_cufft_set_plan_cache_max_size_ops.h>
#include <ATen/ops/_cufft_clear_plan_cache_native.h>
#include <ATen/ops/_cufft_clear_plan_cache_ops.h>
#include <ATen/ops/_unsafe_index_native.h>
#include <ATen/ops/_unsafe_index_ops.h>
#include <ATen/ops/_unsafe_masked_index_native.h>
#include <ATen/ops/_unsafe_masked_index_ops.h>
#include <ATen/ops/_unsafe_masked_index_put_accumulate_native.h>
#include <ATen/ops/_unsafe_masked_index_put_accumulate_ops.h>
#include <ATen/ops/index_copy_native.h>
#include <ATen/ops/index_copy_ops.h>
#include <ATen/ops/index_copy_native.h>
#include <ATen/ops/index_copy_ops.h>
#include <ATen/ops/_unsafe_index_put_native.h>
#include <ATen/ops/_unsafe_index_put_ops.h>
#include <ATen/ops/instance_norm_native.h>
#include <ATen/ops/instance_norm_ops.h>
#include <ATen/ops/isclose_native.h>
#include <ATen/ops/isclose_ops.h>
#include <ATen/ops/is_distributed_native.h>
#include <ATen/ops/is_distributed_ops.h>
#include <ATen/ops/is_floating_point_native.h>
#include <ATen/ops/is_floating_point_ops.h>
#include <ATen/ops/is_complex_native.h>
#include <ATen/ops/is_complex_ops.h>
#include <ATen/ops/is_conj_native.h>
#include <ATen/ops/is_conj_ops.h>
#include <ATen/ops/_is_zerotensor_native.h>
#include <ATen/ops/_is_zerotensor_ops.h>
#include <ATen/ops/is_neg_native.h>
#include <ATen/ops/is_neg_ops.h>
#include <ATen/ops/isreal_native.h>
#include <ATen/ops/isreal_ops.h>
#include <ATen/ops/is_nonzero_native.h>
#include <ATen/ops/is_nonzero_ops.h>
#include <ATen/ops/is_same_size_native.h>
#include <ATen/ops/is_same_size_ops.h>
#include <ATen/ops/is_signed_native.h>
#include <ATen/ops/is_signed_ops.h>
#include <ATen/ops/is_inference_native.h>
#include <ATen/ops/is_inference_ops.h>
#include <ATen/ops/kl_div_native.h>
#include <ATen/ops/kl_div_ops.h>
#include <ATen/ops/layer_norm_native.h>
#include <ATen/ops/layer_norm_ops.h>
#include <ATen/ops/rms_norm_native.h>
#include <ATen/ops/rms_norm_ops.h>
#include <ATen/ops/_cslt_compress_native.h>
#include <ATen/ops/_cslt_compress_ops.h>
#include <ATen/ops/_cslt_sparse_mm_native.h>
#include <ATen/ops/_cslt_sparse_mm_ops.h>
#include <ATen/ops/_cslt_sparse_mm_search_native.h>
#include <ATen/ops/_cslt_sparse_mm_search_ops.h>
#include <ATen/ops/_sparse_semi_structured_tile_native.h>
#include <ATen/ops/_sparse_semi_structured_tile_ops.h>
#include <ATen/ops/_sparse_semi_structured_apply_native.h>
#include <ATen/ops/_sparse_semi_structured_apply_ops.h>
#include <ATen/ops/_sparse_semi_structured_apply_dense_native.h>
#include <ATen/ops/_sparse_semi_structured_apply_dense_ops.h>
#include <ATen/ops/_sparse_semi_structured_linear_native.h>
#include <ATen/ops/_sparse_semi_structured_linear_ops.h>
#include <ATen/ops/_sparse_semi_structured_mm_native.h>
#include <ATen/ops/_sparse_semi_structured_mm_ops.h>
#include <ATen/ops/_sparse_semi_structured_addmm_native.h>
#include <ATen/ops/_sparse_semi_structured_addmm_ops.h>
#include <ATen/ops/_mixed_dtypes_linear_native.h>
#include <ATen/ops/_mixed_dtypes_linear_ops.h>
#include <ATen/ops/fbgemm_linear_int8_weight_fp32_activation_native.h>
#include <ATen/ops/fbgemm_linear_int8_weight_fp32_activation_ops.h>
#include <ATen/ops/fbgemm_linear_int8_weight_native.h>
#include <ATen/ops/fbgemm_linear_int8_weight_ops.h>
#include <ATen/ops/fbgemm_linear_quantize_weight_native.h>
#include <ATen/ops/fbgemm_linear_quantize_weight_ops.h>
#include <ATen/ops/fbgemm_pack_gemm_matrix_fp16_native.h>
#include <ATen/ops/fbgemm_pack_gemm_matrix_fp16_ops.h>
#include <ATen/ops/_wrapped_linear_prepack_native.h>
#include <ATen/ops/_wrapped_linear_prepack_ops.h>
#include <ATen/ops/_wrapped_quantized_linear_prepacked_native.h>
#include <ATen/ops/_wrapped_quantized_linear_prepacked_ops.h>
#include <ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_native.h>
#include <ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_ops.h>
#include <ATen/ops/fbgemm_linear_fp16_weight_native.h>
#include <ATen/ops/fbgemm_linear_fp16_weight_ops.h>
#include <ATen/ops/fbgemm_pack_quantized_matrix_native.h>
#include <ATen/ops/fbgemm_pack_quantized_matrix_ops.h>
#include <ATen/ops/fbgemm_pack_quantized_matrix_native.h>
#include <ATen/ops/fbgemm_pack_quantized_matrix_ops.h>
#include <ATen/ops/log_softmax_native.h>
#include <ATen/ops/log_softmax_ops.h>
#include <ATen/ops/margin_ranking_loss_native.h>
#include <ATen/ops/margin_ranking_loss_ops.h>
#include <ATen/ops/matrix_exp_native.h>
#include <ATen/ops/matrix_exp_ops.h>
#include <ATen/ops/matrix_exp_backward_native.h>
#include <ATen/ops/matrix_exp_backward_ops.h>
#include <ATen/ops/value_selecting_reduction_backward_native.h>
#include <ATen/ops/value_selecting_reduction_backward_ops.h>
#include <ATen/ops/max_pool1d_with_indices_native.h>
#include <ATen/ops/max_pool1d_with_indices_ops.h>
#include <ATen/ops/max_pool1d_native.h>
#include <ATen/ops/max_pool1d_ops.h>
#include <ATen/ops/max_pool2d_native.h>
#include <ATen/ops/max_pool2d_ops.h>
#include <ATen/ops/max_pool3d_native.h>
#include <ATen/ops/max_pool3d_ops.h>
#include <ATen/ops/miopen_convolution_relu_native.h>
#include <ATen/ops/miopen_convolution_relu_ops.h>
#include <ATen/ops/miopen_convolution_add_relu_native.h>
#include <ATen/ops/miopen_convolution_add_relu_ops.h>
#include <ATen/ops/_convert_weight_to_int4pack_native.h>
#include <ATen/ops/_convert_weight_to_int4pack_ops.h>
#include <ATen/ops/_weight_int4pack_mm_native.h>
#include <ATen/ops/_weight_int4pack_mm_ops.h>
#include <ATen/ops/_convert_weight_to_int4pack_for_cpu_native.h>
#include <ATen/ops/_convert_weight_to_int4pack_for_cpu_ops.h>
#include <ATen/ops/_weight_int4pack_mm_for_cpu_native.h>
#include <ATen/ops/_weight_int4pack_mm_for_cpu_ops.h>
#include <ATen/ops/_dyn_quant_pack_4bit_weight_native.h>
#include <ATen/ops/_dyn_quant_pack_4bit_weight_ops.h>
#include <ATen/ops/_dyn_quant_matmul_4bit_native.h>
#include <ATen/ops/_dyn_quant_matmul_4bit_ops.h>
#include <ATen/ops/_weight_int8pack_mm_native.h>
#include <ATen/ops/_weight_int8pack_mm_ops.h>
#include <ATen/ops/_sparse_mm_native.h>
#include <ATen/ops/_sparse_mm_ops.h>
#include <ATen/ops/_sparse_mm_native.h>
#include <ATen/ops/_sparse_mm_ops.h>
#include <ATen/ops/multiply_native.h>
#include <ATen/ops/multiply_ops.h>
#include <ATen/ops/multiply_native.h>
#include <ATen/ops/multiply_ops.h>
#include <ATen/ops/is_vulkan_available_native.h>
#include <ATen/ops/is_vulkan_available_ops.h>
#include <ATen/ops/_nnpack_available_native.h>
#include <ATen/ops/_nnpack_available_ops.h>
#include <ATen/ops/pairwise_distance_native.h>
#include <ATen/ops/pairwise_distance_ops.h>
#include <ATen/ops/cdist_native.h>
#include <ATen/ops/cdist_ops.h>
#include <ATen/ops/pdist_native.h>
#include <ATen/ops/pdist_ops.h>
#include <ATen/ops/cosine_similarity_native.h>
#include <ATen/ops/cosine_similarity_ops.h>
#include <ATen/ops/native_channel_shuffle_native.h>
#include <ATen/ops/native_channel_shuffle_ops.h>
#include <ATen/ops/is_pinned_native.h>
#include <ATen/ops/is_pinned_ops.h>
#include <ATen/ops/pinverse_native.h>
#include <ATen/ops/pinverse_ops.h>
#include <ATen/ops/poisson_nll_loss_native.h>
#include <ATen/ops/poisson_nll_loss_ops.h>
#include <ATen/ops/repeat_interleave_native.h>
#include <ATen/ops/repeat_interleave_ops.h>
#include <ATen/ops/repeat_interleave_native.h>
#include <ATen/ops/repeat_interleave_ops.h>
#include <ATen/ops/_reshape_copy_native.h>
#include <ATen/ops/_reshape_copy_ops.h>
#include <ATen/ops/rrelu_native.h>
#include <ATen/ops/rrelu_ops.h>
#include <ATen/ops/rrelu_native.h>
#include <ATen/ops/rrelu_ops.h>
#include <ATen/ops/relu6_native.h>
#include <ATen/ops/relu6_ops.h>
#include <ATen/ops/relu6_native.h>
#include <ATen/ops/relu6_ops.h>
#include <ATen/ops/prelu_native.h>
#include <ATen/ops/prelu_ops.h>
#include <ATen/ops/_prelu_kernel_native.h>
#include <ATen/ops/_prelu_kernel_ops.h>
#include <ATen/ops/_prelu_kernel_backward_native.h>
#include <ATen/ops/_prelu_kernel_backward_ops.h>
#include <ATen/ops/infinitely_differentiable_gelu_backward_native.h>
#include <ATen/ops/infinitely_differentiable_gelu_backward_ops.h>
#include <ATen/ops/_nested_select_backward_native.h>
#include <ATen/ops/_nested_select_backward_ops.h>
#include <ATen/ops/selu_native.h>
#include <ATen/ops/selu_ops.h>
#include <ATen/ops/selu_native.h>
#include <ATen/ops/selu_ops.h>
#include <ATen/ops/mish_backward_native.h>
#include <ATen/ops/mish_backward_ops.h>
#include <ATen/ops/size_native.h>
#include <ATen/ops/size_ops.h>
#include <ATen/ops/size_native.h>
#include <ATen/ops/size_ops.h>
#include <ATen/ops/sym_size_native.h>
#include <ATen/ops/sym_size_ops.h>
#include <ATen/ops/sym_numel_native.h>
#include <ATen/ops/sym_numel_ops.h>
#include <ATen/ops/sym_storage_offset_native.h>
#include <ATen/ops/sym_storage_offset_ops.h>
#include <ATen/ops/smm_native.h>
#include <ATen/ops/smm_ops.h>
#include <ATen/ops/softmax_native.h>
#include <ATen/ops/softmax_ops.h>
#include <ATen/ops/stft_native.h>
#include <ATen/ops/stft_ops.h>
#include <ATen/ops/stft_native.h>
#include <ATen/ops/stft_ops.h>
#include <ATen/ops/istft_native.h>
#include <ATen/ops/istft_ops.h>
#include <ATen/ops/stride_native.h>
#include <ATen/ops/stride_ops.h>
#include <ATen/ops/stride_native.h>
#include <ATen/ops/stride_ops.h>
#include <ATen/ops/sym_stride_native.h>
#include <ATen/ops/sym_stride_ops.h>
#include <ATen/ops/_nested_sum_backward_native.h>
#include <ATen/ops/_nested_sum_backward_ops.h>
#include <ATen/ops/sum_to_size_native.h>
#include <ATen/ops/sum_to_size_ops.h>
#include <ATen/ops/std_native.h>
#include <ATen/ops/std_ops.h>
#include <ATen/ops/std_mean_native.h>
#include <ATen/ops/std_mean_ops.h>
#include <ATen/ops/std_mean_native.h>
#include <ATen/ops/std_mean_ops.h>
#include <ATen/ops/std_mean_native.h>
#include <ATen/ops/std_mean_ops.h>
#include <ATen/ops/std_mean_native.h>
#include <ATen/ops/std_mean_ops.h>
#include <ATen/ops/tile_native.h>
#include <ATen/ops/tile_ops.h>
#include <ATen/ops/one_hot_native.h>
#include <ATen/ops/one_hot_ops.h>
#include <ATen/ops/fliplr_native.h>
#include <ATen/ops/fliplr_ops.h>
#include <ATen/ops/flipud_native.h>
#include <ATen/ops/flipud_ops.h>
#include <ATen/ops/trapezoid_native.h>
#include <ATen/ops/trapezoid_ops.h>
#include <ATen/ops/trapezoid_native.h>
#include <ATen/ops/trapezoid_ops.h>
#include <ATen/ops/trapz_native.h>
#include <ATen/ops/trapz_ops.h>
#include <ATen/ops/trapz_native.h>
#include <ATen/ops/trapz_ops.h>
#include <ATen/ops/_nested_tensor_from_mask_left_aligned_native.h>
#include <ATen/ops/_nested_tensor_from_mask_left_aligned_ops.h>
#include <ATen/ops/_nested_get_offsets_native.h>
#include <ATen/ops/_nested_get_offsets_ops.h>
#include <ATen/ops/_nested_get_lengths_native.h>
#include <ATen/ops/_nested_get_lengths_ops.h>
#include <ATen/ops/_nested_get_ragged_idx_native.h>
#include <ATen/ops/_nested_get_ragged_idx_ops.h>
#include <ATen/ops/_nested_get_min_seqlen_native.h>
#include <ATen/ops/_nested_get_min_seqlen_ops.h>
#include <ATen/ops/_nested_get_max_seqlen_native.h>
#include <ATen/ops/_nested_get_max_seqlen_ops.h>
#include <ATen/ops/_nested_get_jagged_dummy_native.h>
#include <ATen/ops/_nested_get_jagged_dummy_ops.h>
#include <ATen/ops/_nested_compute_contiguous_strides_offsets_native.h>
#include <ATen/ops/_nested_compute_contiguous_strides_offsets_ops.h>
#include <ATen/ops/triplet_margin_loss_native.h>
#include <ATen/ops/triplet_margin_loss_ops.h>
#include <ATen/ops/type_as_native.h>
#include <ATen/ops/type_as_ops.h>
#include <ATen/ops/_has_compatible_shallow_copy_type_native.h>
#include <ATen/ops/_has_compatible_shallow_copy_type_ops.h>
#include <ATen/ops/vander_native.h>
#include <ATen/ops/vander_ops.h>
#include <ATen/ops/var_native.h>
#include <ATen/ops/var_ops.h>
#include <ATen/ops/var_mean_native.h>
#include <ATen/ops/var_mean_ops.h>
#include <ATen/ops/var_mean_native.h>
#include <ATen/ops/var_mean_ops.h>
#include <ATen/ops/var_mean_native.h>
#include <ATen/ops/var_mean_ops.h>
#include <ATen/ops/var_mean_native.h>
#include <ATen/ops/var_mean_ops.h>
#include <ATen/ops/where_native.h>
#include <ATen/ops/where_ops.h>
#include <ATen/ops/where_native.h>
#include <ATen/ops/where_ops.h>
#include <ATen/ops/where_native.h>
#include <ATen/ops/where_ops.h>
#include <ATen/ops/where_native.h>
#include <ATen/ops/where_ops.h>
#include <ATen/ops/norm_except_dim_native.h>
#include <ATen/ops/norm_except_dim_ops.h>
#include <ATen/ops/_weight_norm_native.h>
#include <ATen/ops/_weight_norm_ops.h>
#include <ATen/ops/_weight_norm_differentiable_backward_native.h>
#include <ATen/ops/_weight_norm_differentiable_backward_ops.h>
#include <ATen/ops/batch_norm_backward_native.h>
#include <ATen/ops/batch_norm_backward_ops.h>
#include <ATen/ops/_sparse_sum_native.h>
#include <ATen/ops/_sparse_sum_ops.h>
#include <ATen/ops/_sparse_sum_native.h>
#include <ATen/ops/_sparse_sum_ops.h>
#include <ATen/ops/_sparse_sum_native.h>
#include <ATen/ops/_sparse_sum_ops.h>
#include <ATen/ops/_sparse_softmax_native.h>
#include <ATen/ops/_sparse_softmax_ops.h>
#include <ATen/ops/_sparse_softmax_native.h>
#include <ATen/ops/_sparse_softmax_ops.h>
#include <ATen/ops/_sparse_log_softmax_native.h>
#include <ATen/ops/_sparse_log_softmax_ops.h>
#include <ATen/ops/_sparse_log_softmax_native.h>
#include <ATen/ops/_sparse_log_softmax_ops.h>
#include <ATen/ops/subtract_native.h>
#include <ATen/ops/subtract_ops.h>
#include <ATen/ops/subtract_native.h>
#include <ATen/ops/subtract_ops.h>
#include <ATen/ops/_sparse_mm_reduce_impl_native.h>
#include <ATen/ops/_sparse_mm_reduce_impl_ops.h>
#include <ATen/ops/_sparse_mm_reduce_impl_backward_native.h>
#include <ATen/ops/_sparse_mm_reduce_impl_backward_ops.h>
#include <ATen/ops/_scaled_grouped_mm_native.h>
#include <ATen/ops/_scaled_grouped_mm_ops.h>
#include <ATen/ops/_sparse_compressed_tensor_with_dims_native.h>
#include <ATen/ops/_sparse_compressed_tensor_with_dims_ops.h>
#include <ATen/ops/sparse_compressed_tensor_native.h>
#include <ATen/ops/sparse_compressed_tensor_ops.h>
#include <ATen/ops/sparse_csr_tensor_native.h>
#include <ATen/ops/sparse_csr_tensor_ops.h>
#include <ATen/ops/sparse_csc_tensor_native.h>
#include <ATen/ops/sparse_csc_tensor_ops.h>
#include <ATen/ops/sparse_bsr_tensor_native.h>
#include <ATen/ops/sparse_bsr_tensor_ops.h>
#include <ATen/ops/sparse_bsc_tensor_native.h>
#include <ATen/ops/sparse_bsc_tensor_ops.h>
#include <ATen/ops/sparse_compressed_tensor_native.h>
#include <ATen/ops/sparse_compressed_tensor_ops.h>
#include <ATen/ops/sparse_csr_tensor_native.h>
#include <ATen/ops/sparse_csr_tensor_ops.h>
#include <ATen/ops/sparse_csc_tensor_native.h>
#include <ATen/ops/sparse_csc_tensor_ops.h>
#include <ATen/ops/sparse_bsr_tensor_native.h>
#include <ATen/ops/sparse_bsr_tensor_ops.h>
#include <ATen/ops/sparse_bsc_tensor_native.h>
#include <ATen/ops/sparse_bsc_tensor_ops.h>
#include <ATen/ops/_sparse_compressed_tensor_unsafe_native.h>
#include <ATen/ops/_sparse_compressed_tensor_unsafe_ops.h>
#include <ATen/ops/_sparse_csr_tensor_unsafe_native.h>
#include <ATen/ops/_sparse_csr_tensor_unsafe_ops.h>
#include <ATen/ops/_sparse_csc_tensor_unsafe_native.h>
#include <ATen/ops/_sparse_csc_tensor_unsafe_ops.h>
#include <ATen/ops/_sparse_bsr_tensor_unsafe_native.h>
#include <ATen/ops/_sparse_bsr_tensor_unsafe_ops.h>
#include <ATen/ops/_sparse_bsc_tensor_unsafe_native.h>
#include <ATen/ops/_sparse_bsc_tensor_unsafe_ops.h>
#include <ATen/ops/sparse_coo_tensor_native.h>
#include <ATen/ops/sparse_coo_tensor_ops.h>
#include <ATen/ops/sparse_coo_tensor_native.h>
#include <ATen/ops/sparse_coo_tensor_ops.h>
#include <ATen/ops/_sparse_coo_tensor_unsafe_native.h>
#include <ATen/ops/_sparse_coo_tensor_unsafe_ops.h>
#include <ATen/ops/_validate_sparse_coo_tensor_args_native.h>
#include <ATen/ops/_validate_sparse_coo_tensor_args_ops.h>
#include <ATen/ops/_validate_sparse_compressed_tensor_args_native.h>
#include <ATen/ops/_validate_sparse_compressed_tensor_args_ops.h>
#include <ATen/ops/_validate_sparse_csr_tensor_args_native.h>
#include <ATen/ops/_validate_sparse_csr_tensor_args_ops.h>
#include <ATen/ops/_validate_sparse_csc_tensor_args_native.h>
#include <ATen/ops/_validate_sparse_csc_tensor_args_ops.h>
#include <ATen/ops/_validate_sparse_bsr_tensor_args_native.h>
#include <ATen/ops/_validate_sparse_bsr_tensor_args_ops.h>
#include <ATen/ops/_validate_sparse_bsc_tensor_args_native.h>
#include <ATen/ops/_validate_sparse_bsc_tensor_args_ops.h>
#include <ATen/ops/_to_cpu_native.h>
#include <ATen/ops/_to_cpu_ops.h>
#include <ATen/ops/to_dense_native.h>
#include <ATen/ops/to_dense_ops.h>
#include <ATen/ops/to_dense_backward_native.h>
#include <ATen/ops/to_dense_backward_ops.h>
#include <ATen/ops/sparse_dim_native.h>
#include <ATen/ops/sparse_dim_ops.h>
#include <ATen/ops/_dimI_native.h>
#include <ATen/ops/_dimI_ops.h>
#include <ATen/ops/dense_dim_native.h>
#include <ATen/ops/dense_dim_ops.h>
#include <ATen/ops/_dimV_native.h>
#include <ATen/ops/_dimV_ops.h>
#include <ATen/ops/_nnz_native.h>
#include <ATen/ops/_nnz_ops.h>
#include <ATen/ops/is_coalesced_native.h>
#include <ATen/ops/is_coalesced_ops.h>
#include <ATen/ops/to_sparse_native.h>
#include <ATen/ops/to_sparse_ops.h>
#include <ATen/ops/to_sparse_native.h>
#include <ATen/ops/to_sparse_ops.h>
#include <ATen/ops/to_sparse_csr_native.h>
#include <ATen/ops/to_sparse_csr_ops.h>
#include <ATen/ops/to_sparse_csc_native.h>
#include <ATen/ops/to_sparse_csc_ops.h>
#include <ATen/ops/to_sparse_bsr_native.h>
#include <ATen/ops/to_sparse_bsr_ops.h>
#include <ATen/ops/to_sparse_bsc_native.h>
#include <ATen/ops/to_sparse_bsc_ops.h>
#include <ATen/ops/_to_sparse_semi_structured_native.h>
#include <ATen/ops/_to_sparse_semi_structured_ops.h>
#include <ATen/ops/to_mkldnn_backward_native.h>
#include <ATen/ops/to_mkldnn_backward_ops.h>
#include <ATen/ops/q_scale_native.h>
#include <ATen/ops/q_scale_ops.h>
#include <ATen/ops/q_zero_point_native.h>
#include <ATen/ops/q_zero_point_ops.h>
#include <ATen/ops/q_per_channel_axis_native.h>
#include <ATen/ops/q_per_channel_axis_ops.h>
#include <ATen/ops/qscheme_native.h>
#include <ATen/ops/qscheme_ops.h>
#include <ATen/ops/fake_quantize_per_tensor_affine_native.h>
#include <ATen/ops/fake_quantize_per_tensor_affine_ops.h>
#include <ATen/ops/fake_quantize_per_tensor_affine_native.h>
#include <ATen/ops/fake_quantize_per_tensor_affine_ops.h>
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward_native.h>
#include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward_ops.h>
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_native.h>
#include <ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward_ops.h>
#include <ATen/ops/fake_quantize_per_channel_affine_native.h>
#include <ATen/ops/fake_quantize_per_channel_affine_ops.h>
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_native.h>
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_ops.h>
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward_native.h>
#include <ATen/ops/_fake_quantize_learnable_per_channel_affine_backward_ops.h>
#include <ATen/ops/fused_moving_avg_obs_fake_quant_native.h>
#include <ATen/ops/fused_moving_avg_obs_fake_quant_ops.h>
#include <ATen/ops/_choose_qparams_per_tensor_native.h>
#include <ATen/ops/_choose_qparams_per_tensor_ops.h>
#include <ATen/ops/_saturate_weight_to_fp16_native.h>
#include <ATen/ops/_saturate_weight_to_fp16_ops.h>
#include <ATen/ops/choose_qparams_optimized_native.h>
#include <ATen/ops/choose_qparams_optimized_ops.h>
#include <ATen/ops/meshgrid_native.h>
#include <ATen/ops/meshgrid_ops.h>
#include <ATen/ops/meshgrid_native.h>
#include <ATen/ops/meshgrid_ops.h>
#include <ATen/ops/cartesian_prod_native.h>
#include <ATen/ops/cartesian_prod_ops.h>
#include <ATen/ops/combinations_native.h>
#include <ATen/ops/combinations_ops.h>
#include <ATen/ops/item_native.h>
#include <ATen/ops/item_ops.h>
#include <ATen/ops/result_type_native.h>
#include <ATen/ops/result_type_ops.h>
#include <ATen/ops/result_type_native.h>
#include <ATen/ops/result_type_ops.h>
#include <ATen/ops/result_type_native.h>
#include <ATen/ops/result_type_ops.h>
#include <ATen/ops/result_type_native.h>
#include <ATen/ops/result_type_ops.h>
#include <ATen/ops/can_cast_native.h>
#include <ATen/ops/can_cast_ops.h>
#include <ATen/ops/promote_types_native.h>
#include <ATen/ops/promote_types_ops.h>
#include <ATen/ops/_local_scalar_dense_native.h>
#include <ATen/ops/_local_scalar_dense_ops.h>
#include <ATen/ops/_thnn_fused_lstm_cell_backward_native.h>
#include <ATen/ops/_thnn_fused_lstm_cell_backward_ops.h>
#include <ATen/ops/_thnn_differentiable_lstm_cell_backward_native.h>
#include <ATen/ops/_thnn_differentiable_lstm_cell_backward_ops.h>
#include <ATen/ops/_thnn_differentiable_gru_cell_backward_native.h>
#include <ATen/ops/_thnn_differentiable_gru_cell_backward_ops.h>
#include <ATen/ops/lstm_native.h>
#include <ATen/ops/lstm_ops.h>
#include <ATen/ops/lstm_native.h>
#include <ATen/ops/lstm_ops.h>
#include <ATen/ops/gru_native.h>
#include <ATen/ops/gru_ops.h>
#include <ATen/ops/gru_native.h>
#include <ATen/ops/gru_ops.h>
#include <ATen/ops/rnn_tanh_native.h>
#include <ATen/ops/rnn_tanh_ops.h>
#include <ATen/ops/rnn_tanh_native.h>
#include <ATen/ops/rnn_tanh_ops.h>
#include <ATen/ops/rnn_relu_native.h>
#include <ATen/ops/rnn_relu_ops.h>
#include <ATen/ops/rnn_relu_native.h>
#include <ATen/ops/rnn_relu_ops.h>
#include <ATen/ops/lstm_cell_native.h>
#include <ATen/ops/lstm_cell_ops.h>
#include <ATen/ops/gru_cell_native.h>
#include <ATen/ops/gru_cell_ops.h>
#include <ATen/ops/rnn_tanh_cell_native.h>
#include <ATen/ops/rnn_tanh_cell_ops.h>
#include <ATen/ops/rnn_relu_cell_native.h>
#include <ATen/ops/rnn_relu_cell_ops.h>
#include <ATen/ops/quantized_lstm_cell_native.h>
#include <ATen/ops/quantized_lstm_cell_ops.h>
#include <ATen/ops/quantized_gru_cell_native.h>
#include <ATen/ops/quantized_gru_cell_ops.h>
#include <ATen/ops/quantized_rnn_relu_cell_native.h>
#include <ATen/ops/quantized_rnn_relu_cell_ops.h>
#include <ATen/ops/quantized_rnn_tanh_cell_native.h>
#include <ATen/ops/quantized_rnn_tanh_cell_ops.h>
#include <ATen/ops/_pack_padded_sequence_backward_native.h>
#include <ATen/ops/_pack_padded_sequence_backward_ops.h>
#include <ATen/ops/_pad_packed_sequence_native.h>
#include <ATen/ops/_pad_packed_sequence_ops.h>
#include <ATen/ops/set_native.h>
#include <ATen/ops/set_ops.h>
#include <ATen/ops/is_set_to_native.h>
#include <ATen/ops/is_set_to_ops.h>
#include <ATen/ops/masked_scatter_backward_native.h>
#include <ATen/ops/masked_scatter_backward_ops.h>
#include <ATen/ops/index_add_native.h>
#include <ATen/ops/index_add_ops.h>
#include <ATen/ops/index_fill_native.h>
#include <ATen/ops/index_fill_ops.h>
#include <ATen/ops/index_fill_native.h>
#include <ATen/ops/index_fill_ops.h>
#include <ATen/ops/index_fill_native.h>
#include <ATen/ops/index_fill_ops.h>
#include <ATen/ops/index_fill_native.h>
#include <ATen/ops/index_fill_ops.h>
#include <ATen/ops/scatter_native.h>
#include <ATen/ops/scatter_ops.h>
#include <ATen/ops/scatter_native.h>
#include <ATen/ops/scatter_ops.h>
#include <ATen/ops/scatter_add_native.h>
#include <ATen/ops/scatter_add_ops.h>
#include <ATen/ops/and_native.h>
#include <ATen/ops/and_ops.h>
#include <ATen/ops/and_native.h>
#include <ATen/ops/and_ops.h>
#include <ATen/ops/and_native.h>
#include <ATen/ops/and_ops.h>
#include <ATen/ops/and_native.h>
#include <ATen/ops/and_ops.h>
#include <ATen/ops/or_native.h>
#include <ATen/ops/or_ops.h>
#include <ATen/ops/or_native.h>
#include <ATen/ops/or_ops.h>
#include <ATen/ops/or_native.h>
#include <ATen/ops/or_ops.h>
#include <ATen/ops/or_native.h>
#include <ATen/ops/or_ops.h>
#include <ATen/ops/xor_native.h>
#include <ATen/ops/xor_ops.h>
#include <ATen/ops/xor_native.h>
#include <ATen/ops/xor_ops.h>
#include <ATen/ops/xor_native.h>
#include <ATen/ops/xor_ops.h>
#include <ATen/ops/xor_native.h>
#include <ATen/ops/xor_ops.h>
#include <ATen/ops/trace_backward_native.h>
#include <ATen/ops/trace_backward_ops.h>
#include <ATen/ops/index_select_backward_native.h>
#include <ATen/ops/index_select_backward_ops.h>
#include <ATen/ops/masked_select_backward_native.h>
#include <ATen/ops/masked_select_backward_ops.h>
#include <ATen/ops/nonzero_numpy_native.h>
#include <ATen/ops/nonzero_numpy_ops.h>
#include <ATen/ops/argwhere_native.h>
#include <ATen/ops/argwhere_ops.h>
#include <ATen/ops/gather_backward_native.h>
#include <ATen/ops/gather_backward_ops.h>
#include <ATen/ops/_gather_sparse_backward_native.h>
#include <ATen/ops/_gather_sparse_backward_ops.h>
#include <ATen/ops/cross_entropy_loss_native.h>
#include <ATen/ops/cross_entropy_loss_ops.h>
#include <ATen/ops/_linalg_check_errors_native.h>
#include <ATen/ops/_linalg_check_errors_ops.h>
#include <ATen/ops/linalg_vander_native.h>
#include <ATen/ops/linalg_vander_ops.h>
#include <ATen/ops/_lu_with_info_native.h>
#include <ATen/ops/_lu_with_info_ops.h>
#include <ATen/ops/polygamma_native.h>
#include <ATen/ops/polygamma_ops.h>
#include <ATen/ops/histogramdd_native.h>
#include <ATen/ops/histogramdd_ops.h>
#include <ATen/ops/histogramdd_native.h>
#include <ATen/ops/histogramdd_ops.h>
#include <ATen/ops/histogramdd_native.h>
#include <ATen/ops/histogramdd_ops.h>
#include <ATen/ops/argsort_native.h>
#include <ATen/ops/argsort_ops.h>
#include <ATen/ops/argsort_native.h>
#include <ATen/ops/argsort_ops.h>
#include <ATen/ops/equal_native.h>
#include <ATen/ops/equal_ops.h>
#include <ATen/ops/_foreach_pow_native.h>
#include <ATen/ops/_foreach_pow_ops.h>
#include <ATen/ops/l1_loss_native.h>
#include <ATen/ops/l1_loss_ops.h>
#include <ATen/ops/nll_loss_nd_native.h>
#include <ATen/ops/nll_loss_nd_ops.h>
#include <ATen/ops/adaptive_avg_pool3d_backward_native.h>
#include <ATen/ops/adaptive_avg_pool3d_backward_ops.h>
#include <ATen/ops/_pad_circular_native.h>
#include <ATen/ops/_pad_circular_ops.h>
#include <ATen/ops/_pad_enum_native.h>
#include <ATen/ops/_pad_enum_ops.h>
#include <ATen/ops/pad_native.h>
#include <ATen/ops/pad_ops.h>
#include <ATen/ops/upsample_linear1d_native.h>
#include <ATen/ops/upsample_linear1d_ops.h>
#include <ATen/ops/_upsample_bilinear2d_aa_native.h>
#include <ATen/ops/_upsample_bilinear2d_aa_ops.h>
#include <ATen/ops/upsample_trilinear3d_native.h>
#include <ATen/ops/upsample_trilinear3d_ops.h>
#include <ATen/ops/upsample_bicubic2d_native.h>
#include <ATen/ops/upsample_bicubic2d_ops.h>
#include <ATen/ops/_upsample_bicubic2d_aa_native.h>
#include <ATen/ops/_upsample_bicubic2d_aa_ops.h>
#include <ATen/ops/upsample_nearest1d_native.h>
#include <ATen/ops/upsample_nearest1d_ops.h>
#include <ATen/ops/_upsample_nearest_exact1d_native.h>
#include <ATen/ops/_upsample_nearest_exact1d_ops.h>
#include <ATen/ops/_upsample_nearest_exact2d_native.h>
#include <ATen/ops/_upsample_nearest_exact2d_ops.h>
#include <ATen/ops/upsample_nearest3d_native.h>
#include <ATen/ops/upsample_nearest3d_ops.h>
#include <ATen/ops/_upsample_nearest_exact3d_native.h>
#include <ATen/ops/_upsample_nearest_exact3d_ops.h>
#include <ATen/ops/_slow_conv2d_backward_native.h>
#include <ATen/ops/_slow_conv2d_backward_ops.h>
#include <ATen/ops/isfinite_native.h>
#include <ATen/ops/isfinite_ops.h>
#include <ATen/ops/record_stream_native.h>
#include <ATen/ops/record_stream_ops.h>
#include <ATen/ops/_add_batch_dim_native.h>
#include <ATen/ops/_add_batch_dim_ops.h>
#include <ATen/ops/_remove_batch_dim_native.h>
#include <ATen/ops/_remove_batch_dim_ops.h>
#include <ATen/ops/special_log_softmax_native.h>
#include <ATen/ops/special_log_softmax_ops.h>
#include <ATen/ops/special_softmax_native.h>
#include <ATen/ops/special_softmax_ops.h>
#include <ATen/ops/fft_fftshift_native.h>
#include <ATen/ops/fft_fftshift_ops.h>
#include <ATen/ops/fft_ifftshift_native.h>
#include <ATen/ops/fft_ifftshift_ops.h>
#include <ATen/ops/det_native.h>
#include <ATen/ops/det_ops.h>
#include <ATen/ops/logdet_native.h>
#include <ATen/ops/logdet_ops.h>
#include <ATen/ops/_linalg_eigvals_native.h>
#include <ATen/ops/_linalg_eigvals_ops.h>
#include <ATen/ops/_spsolve_native.h>
#include <ATen/ops/_spsolve_ops.h>
#include <ATen/ops/nested_to_padded_tensor_native.h>
#include <ATen/ops/nested_to_padded_tensor_ops.h>
#include <ATen/ops/_test_serialization_subcmul_native.h>
#include <ATen/ops/_test_serialization_subcmul_ops.h>
#include <ATen/ops/_test_parallel_materialize_native.h>
#include <ATen/ops/_test_parallel_materialize_ops.h>
#include <ATen/ops/_test_string_default_native.h>
#include <ATen/ops/_test_string_default_ops.h>
#include <ATen/ops/_test_ambiguous_defaults_native.h>
#include <ATen/ops/_test_ambiguous_defaults_ops.h>
#include <ATen/ops/_test_ambiguous_defaults_native.h>
#include <ATen/ops/_test_ambiguous_defaults_ops.h>
#include <ATen/ops/_test_autograd_multiple_dispatch_native.h>
#include <ATen/ops/_test_autograd_multiple_dispatch_ops.h>
#include <ATen/ops/pad_sequence_native.h>
#include <ATen/ops/pad_sequence_ops.h>
#include <ATen/ops/flatten_dense_tensors_native.h>
#include <ATen/ops/flatten_dense_tensors_ops.h>
#include <ATen/ops/unflatten_dense_tensors_native.h>
#include <ATen/ops/unflatten_dense_tensors_ops.h>
#include <ATen/ops/_jagged_to_padded_dense_forward_native.h>
#include <ATen/ops/_jagged_to_padded_dense_forward_ops.h>
#include <ATen/ops/_padded_dense_to_jagged_forward_native.h>
#include <ATen/ops/_padded_dense_to_jagged_forward_ops.h>
#include <ATen/ops/_nested_from_padded_tensor_native.h>
#include <ATen/ops/_nested_from_padded_tensor_ops.h>
#include <ATen/ops/_nested_tensor_softmax_with_shape_native.h>
#include <ATen/ops/_nested_tensor_softmax_with_shape_ops.h>
#include <ATen/ops/_safe_softmax_native.h>
#include <ATen/ops/_safe_softmax_ops.h>
#include <ATen/ops/scaled_dot_product_attention_native.h>
#include <ATen/ops/scaled_dot_product_attention_ops.h>
#include <ATen/ops/_fused_sdp_choice_native.h>
#include <ATen/ops/_fused_sdp_choice_ops.h>
#include <ATen/ops/_scaled_dot_product_attention_math_native.h>
#include <ATen/ops/_scaled_dot_product_attention_math_ops.h>
#include <ATen/ops/_scaled_dot_product_attention_math_for_mps_native.h>
#include <ATen/ops/_scaled_dot_product_attention_math_for_mps_ops.h>
#include <ATen/ops/_scaled_dot_product_flash_attention_native.h>
#include <ATen/ops/_scaled_dot_product_flash_attention_ops.h>
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_native.h>
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_ops.h>
#include <ATen/ops/_scaled_dot_product_fused_attention_overrideable_native.h>
#include <ATen/ops/_scaled_dot_product_fused_attention_overrideable_ops.h>
#include <ATen/ops/_scaled_dot_product_flash_attention_backward_native.h>
#include <ATen/ops/_scaled_dot_product_flash_attention_backward_ops.h>
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_backward_native.h>
#include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_backward_ops.h>
#include <ATen/ops/_scaled_dot_product_fused_attention_overrideable_backward_native.h>
#include <ATen/ops/_scaled_dot_product_fused_attention_overrideable_backward_ops.h>
#include <ATen/ops/_scaled_dot_product_efficient_attention_native.h>
#include <ATen/ops/_scaled_dot_product_efficient_attention_ops.h>
#include <ATen/ops/_scaled_dot_product_efficient_attention_backward_native.h>
#include <ATen/ops/_scaled_dot_product_efficient_attention_backward_ops.h>
#include <ATen/ops/_scaled_dot_product_cudnn_attention_native.h>
#include <ATen/ops/_scaled_dot_product_cudnn_attention_ops.h>
#include <ATen/ops/_scaled_dot_product_cudnn_attention_backward_native.h>
#include <ATen/ops/_scaled_dot_product_cudnn_attention_backward_ops.h>
#include <ATen/ops/_flash_attention_forward_native.h>
#include <ATen/ops/_flash_attention_forward_ops.h>
#include <ATen/ops/_flash_attention_backward_native.h>
#include <ATen/ops/_flash_attention_backward_ops.h>
#include <ATen/ops/_efficient_attention_forward_native.h>
#include <ATen/ops/_efficient_attention_forward_ops.h>
#include <ATen/ops/_efficient_attention_backward_native.h>
#include <ATen/ops/_efficient_attention_backward_ops.h>
#include <ATen/ops/_cudnn_attention_forward_native.h>
#include <ATen/ops/_cudnn_attention_forward_ops.h>
#include <ATen/ops/_fill_mem_eff_dropout_mask_native.h>
#include <ATen/ops/_fill_mem_eff_dropout_mask_ops.h>
#include <ATen/ops/_propagate_xla_data_native.h>
#include <ATen/ops/_propagate_xla_data_ops.h>
#endif

namespace at {
namespace functionalization {

// This keyset is used by functionalization when it calls into meta kernels
// to accurately propagate stride metadata.
// Exclude any modes: the purpose of calling into meta kernels is only as an implementation
// detail to perform shape inference, and we don't want any modal keys to run.
// Specifically, we want to prevent functionalization and Python modes from running.
constexpr auto exclude_keys_for_meta_dispatch =
    c10::functorch_transforms_ks |
    c10::DispatchKeySet({
        c10::DispatchKey::FuncTorchDynamicLayerBackMode,
        c10::DispatchKey::FuncTorchDynamicLayerFrontMode,
        c10::DispatchKey::Python,
        c10::DispatchKey::PreDispatch,

    });

// Helper around at::has_internal_overlap.
// The ATen util is used in hot-path eager mode: it's always fast,
// but might return TOO_HARD sometimes.
// During functionalization, we're ok taking a bit longer
// to detect memory overlap.
inline bool has_internal_overlap_helper(const at::Tensor t) {
  auto has_overlap = at::has_internal_overlap(t);
  if (has_overlap == at::MemOverlap::Yes) return true;
  if (has_overlap == at::MemOverlap::No) return false;
  return false;
}


inline Tensor to_meta(const Tensor& t) {
    if (!t.defined()) return t;
    return at::native::empty_strided_meta_symint(t.sym_sizes(), t.sym_strides(),
/*dtype=*/t.scalar_type(), /*layout=*/t.layout(),
/*device=*/c10::Device(kMeta), /*pin_memory=*/std::nullopt);
}

inline std::optional<Tensor> to_meta(const std::optional<Tensor>& t) {
  if (t.has_value()) {
    return to_meta(*t);
  }
  return std::nullopt;
}

inline std::vector<Tensor> to_meta(at::ITensorListRef t_list) {
  std::vector<Tensor> outputs;
  outputs.reserve(t_list.size());
  for (const auto& tensor : t_list) {
    outputs.push_back(to_meta(tensor));
  }
  return outputs;
}

inline c10::List<Tensor> to_meta(const c10::List<Tensor>& t_list) {
  c10::List<Tensor> outputs;
  outputs.reserve(t_list.size());
  for (const auto i : c10::irange(t_list.size())) {
    outputs.push_back(to_meta(t_list[i]));
  }
  return outputs;
}

inline c10::List<::std::optional<Tensor>> to_meta(const c10::List<::std::optional<Tensor>>& t_list) {
  c10::List<::std::optional<Tensor>> outputs;
  outputs.reserve(t_list.size());
  for (const auto i : c10::irange(t_list.size())) {
    outputs.push_back(to_meta(t_list[i]));
  }
  return outputs;
}

static bool disable_meta_reference() {
  static auto env = std::getenv("TORCH_DISABLE_FUNCTIONALIZATION_META_REFERENCE");
  return env != nullptr && std::strcmp(env, "1") == 0;
}



    at::Tensor & _new_zeros_with_same_feature_meta_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_new_zeros_with_same_feature_meta_out::call(self_meta, other_meta, self_num_batch_dims, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_new_zeros_with_same_feature_meta_out::call(self_, other_, self_num_batch_dims, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_new_zeros_with_same_feature_meta::call(self_, other_, self_num_batch_dims);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> _cudnn_ctc_loss_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto log_probs_meta = to_meta(log_probs);
        auto targets_meta = to_meta(targets);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_cudnn_ctc_loss_out::call(log_probs_meta, targets_meta, input_lengths, target_lengths, blank, deterministic, zero_infinity, out0_meta, out1_meta);
      }
      
      at::Tensor log_probs_;
      if (at::functionalization::impl::isFunctionalTensor(log_probs)) {
        at::functionalization::impl::sync(log_probs);
        log_probs_ = at::functionalization::impl::from_functional_tensor(log_probs);
      } else {
        log_probs_ = log_probs;
      }
      
      at::Tensor targets_;
      if (at::functionalization::impl::isFunctionalTensor(targets)) {
        at::functionalization::impl::sync(targets);
        targets_ = at::functionalization::impl::from_functional_tensor(targets);
      } else {
        targets_ = targets;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || log_probs.device().type() == c10::DeviceType::XLA || targets.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(log_probs) || at::functionalization::impl::isFunctionalTensor(targets))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_cudnn_ctc_loss_out::call(log_probs_, targets_, input_lengths, target_lengths, blank, deterministic, zero_infinity, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_cudnn_ctc_loss::call(log_probs_, targets_, input_lengths, target_lengths, blank, deterministic, zero_infinity);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    at::Tensor & _cudnn_rnn_flatten_weight_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto weight_arr_meta = to_meta(weight_arr);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr_meta, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out_meta);
      }
      
      ::std::vector<at::Tensor> weight_arr_;
      if (at::functionalization::impl::isFunctionalTensor(weight_arr)) {
        at::functionalization::impl::sync(weight_arr);
        weight_arr_ = at::functionalization::impl::from_functional_tensor(weight_arr);
      } else {
        weight_arr_ = weight_arr.vec();
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(weight_arr))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_cudnn_rnn_flatten_weight_out::call(weight_arr_, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr_, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _cudnn_rnn_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const ::std::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto weight_buf_meta = to_meta(weight_buf);
        auto hx_meta = to_meta(hx);
        auto cx_meta = to_meta(cx);
        auto dropout_state_meta = to_meta(dropout_state);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        auto out3_meta = to_meta(out3);
        auto out4_meta = to_meta(out4);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_cudnn_rnn_out::call(input_meta, weight_meta, weight_stride0, weight_buf_meta, hx_meta, cx_meta, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_meta, out0_meta, out1_meta, out2_meta, out3_meta, out4_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      ::std::vector<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight.vec();
      }
      
      ::std::optional<at::Tensor> weight_buf_;
      if (at::functionalization::impl::isFunctionalTensor(weight_buf)) {
        at::functionalization::impl::sync(weight_buf);
        weight_buf_ = at::functionalization::impl::from_functional_tensor(weight_buf);
      } else {
        weight_buf_ = weight_buf;
      }
      
      at::Tensor hx_;
      if (at::functionalization::impl::isFunctionalTensor(hx)) {
        at::functionalization::impl::sync(hx);
        hx_ = at::functionalization::impl::from_functional_tensor(hx);
      } else {
        hx_ = hx;
      }
      
      ::std::optional<at::Tensor> cx_;
      if (at::functionalization::impl::isFunctionalTensor(cx)) {
        at::functionalization::impl::sync(cx);
        cx_ = at::functionalization::impl::from_functional_tensor(cx);
      } else {
        cx_ = cx;
      }
      
      ::std::optional<at::Tensor> dropout_state_;
      if (at::functionalization::impl::isFunctionalTensor(dropout_state)) {
        at::functionalization::impl::sync(dropout_state);
        dropout_state_ = at::functionalization::impl::from_functional_tensor(dropout_state);
      } else {
        dropout_state_ = dropout_state;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      
      at::Tensor out3_;
      if (at::functionalization::impl::isFunctionalTensor(out3)) {
        at::functionalization::impl::sync(out3);
        out3_ = at::functionalization::impl::from_functional_tensor(out3);
      } else {
        out3_ = out3;
      }
      
      at::Tensor out4_;
      if (at::functionalization::impl::isFunctionalTensor(out4)) {
        at::functionalization::impl::sync(out4);
        out4_ = at::functionalization::impl::from_functional_tensor(out4);
      } else {
        out4_ = out4;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3) && at::functionalization::impl::isFunctionalTensor(out4))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || hx.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(weight_buf) || at::functionalization::impl::isFunctionalTensor(hx) || at::functionalization::impl::isFunctionalTensor(cx) || at::functionalization::impl::isFunctionalTensor(dropout_state))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_cudnn_rnn_out::call(input_, weight_, weight_stride0, weight_buf_, hx_, cx_, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_, out0_, out1_, out2_, out3_, out4_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3, out4);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_cudnn_rnn::call(input_, weight_, weight_stride0, weight_buf_, hx_, cx_, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
  auto out3_inner = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::replace_(out3, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(out3);
  at::functionalization::impl::sync(out3);
  auto out3_inner_updated = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::propagate_xla_data_direct(out3_inner, out3_inner_updated);
  auto out4_inner = at::functionalization::impl::from_functional_tensor(out4);
  at::functionalization::impl::replace_(out4, std::get<4>(tmp_output));
  at::functionalization::impl::commit_update(out4);
  at::functionalization::impl::sync(out4);
  auto out4_inner_updated = at::functionalization::impl::from_functional_tensor(out4);
  at::functionalization::impl::propagate_xla_data_direct(out4_inner, out4_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3, out4);
      }
    }

    void _cudnn_rnn_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, const at::Tensor & output, const ::std::optional<at::Tensor> & grad_output, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto weight_buf_meta = to_meta(weight_buf);
        auto hx_meta = to_meta(hx);
        auto cx_meta = to_meta(cx);
        auto output_meta = to_meta(output);
        auto grad_output_meta = to_meta(grad_output);
        auto grad_hy_meta = to_meta(grad_hy);
        auto grad_cy_meta = to_meta(grad_cy);
        auto dropout_state_meta = to_meta(dropout_state);
        auto reserve_meta = to_meta(reserve);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        auto out3_meta = to_meta(out3);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_cudnn_rnn_backward_out::call(input_meta, weight_meta, weight_stride0, weight_buf_meta, hx_meta, cx_meta, output_meta, grad_output_meta, grad_hy_meta, grad_cy_meta, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_meta, reserve_meta, output_mask, out0_meta, out1_meta, out2_meta, out3_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      ::std::vector<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight.vec();
      }
      
      at::Tensor weight_buf_;
      if (at::functionalization::impl::isFunctionalTensor(weight_buf)) {
        at::functionalization::impl::sync(weight_buf);
        weight_buf_ = at::functionalization::impl::from_functional_tensor(weight_buf);
      } else {
        weight_buf_ = weight_buf;
      }
      
      at::Tensor hx_;
      if (at::functionalization::impl::isFunctionalTensor(hx)) {
        at::functionalization::impl::sync(hx);
        hx_ = at::functionalization::impl::from_functional_tensor(hx);
      } else {
        hx_ = hx;
      }
      
      ::std::optional<at::Tensor> cx_;
      if (at::functionalization::impl::isFunctionalTensor(cx)) {
        at::functionalization::impl::sync(cx);
        cx_ = at::functionalization::impl::from_functional_tensor(cx);
      } else {
        cx_ = cx;
      }
      
      at::Tensor output_;
      if (at::functionalization::impl::isFunctionalTensor(output)) {
        at::functionalization::impl::sync(output);
        output_ = at::functionalization::impl::from_functional_tensor(output);
      } else {
        output_ = output;
      }
      
      ::std::optional<at::Tensor> grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      ::std::optional<at::Tensor> grad_hy_;
      if (at::functionalization::impl::isFunctionalTensor(grad_hy)) {
        at::functionalization::impl::sync(grad_hy);
        grad_hy_ = at::functionalization::impl::from_functional_tensor(grad_hy);
      } else {
        grad_hy_ = grad_hy;
      }
      
      ::std::optional<at::Tensor> grad_cy_;
      if (at::functionalization::impl::isFunctionalTensor(grad_cy)) {
        at::functionalization::impl::sync(grad_cy);
        grad_cy_ = at::functionalization::impl::from_functional_tensor(grad_cy);
      } else {
        grad_cy_ = grad_cy;
      }
      
      ::std::optional<at::Tensor> dropout_state_;
      if (at::functionalization::impl::isFunctionalTensor(dropout_state)) {
        at::functionalization::impl::sync(dropout_state);
        dropout_state_ = at::functionalization::impl::from_functional_tensor(dropout_state);
      } else {
        dropout_state_ = dropout_state;
      }
      
      at::Tensor reserve_;
      if (at::functionalization::impl::isFunctionalTensor(reserve)) {
        at::functionalization::impl::sync(reserve);
        reserve_ = at::functionalization::impl::from_functional_tensor(reserve);
      } else {
        reserve_ = reserve;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      
      ::std::vector<at::Tensor> out3_;
      if (at::functionalization::impl::isFunctionalTensor(out3)) {
        at::functionalization::impl::sync(out3);
        out3_ = at::functionalization::impl::from_functional_tensor(out3);
      } else {
        out3_ = out3.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || weight_buf.device().type() == c10::DeviceType::XLA || hx.device().type() == c10::DeviceType::XLA || output.device().type() == c10::DeviceType::XLA || reserve.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(weight_buf) || at::functionalization::impl::isFunctionalTensor(hx) || at::functionalization::impl::isFunctionalTensor(cx) || at::functionalization::impl::isFunctionalTensor(output) || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(grad_hy) || at::functionalization::impl::isFunctionalTensor(grad_cy) || at::functionalization::impl::isFunctionalTensor(dropout_state) || at::functionalization::impl::isFunctionalTensor(reserve))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_cudnn_rnn_backward_out::call(input_, weight_, weight_stride0, weight_buf_, hx_, cx_, output_, grad_output_, grad_hy_, grad_cy_, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_, reserve_, output_mask, out0_, out1_, out2_, out3_);
         
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_cudnn_rnn_backward::call(input_, weight_, weight_stride0, weight_buf_, hx_, cx_, output_, grad_output_, grad_hy_, grad_cy_, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_, reserve_, output_mask);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
  auto out3_inner = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::replace_(out3, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(out3);
  at::functionalization::impl::sync(out3);
  auto out3_inner_updated = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::propagate_xla_data_direct(out3_inner, out3_inner_updated);
    
      }
    }

    at::Tensor & _cudnn_init_dropout_state_out_out(c10::DispatchKeySet dispatchKeySet, double dropout, bool train, int64_t dropout_seed, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_cudnn_init_dropout_state_out::call(dropout, train, dropout_seed, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_cudnn_init_dropout_state_out::call(dropout, train, dropout_seed, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_cudnn_init_dropout_state::call(dropout, train, dropout_seed, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> _fused_dropout_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, ::std::optional<at::Generator> generator, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_fused_dropout_out::call(self_meta, p, generator, out0_meta, out1_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_fused_dropout_out::call(self_, p, generator, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_fused_dropout::call(self_, p, generator);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    at::Tensor & _masked_scale_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, double scale, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto mask_meta = to_meta(mask);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_masked_scale_out::call(self_meta, mask_meta, scale, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor mask_;
      if (at::functionalization::impl::isFunctionalTensor(mask)) {
        at::functionalization::impl::sync(mask);
        mask_ = at::functionalization::impl::from_functional_tensor(mask);
      } else {
        mask_ = mask;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || mask.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mask))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_masked_scale_out::call(self_, mask_, scale, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_masked_scale::call(self_, mask_, scale);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> native_dropout_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, ::std::optional<bool> train, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::native_dropout_out::call(input_meta, p, train, out0_meta, out1_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::native_dropout_out::call(input_, p, train, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::native_dropout::call(input_, p, train);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    at::Tensor & native_dropout_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & mask, double scale, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto mask_meta = to_meta(mask);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::native_dropout_backward_out::call(grad_output_meta, mask_meta, scale, out_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor mask_;
      if (at::functionalization::impl::isFunctionalTensor(mask)) {
        at::functionalization::impl::sync(mask);
        mask_ = at::functionalization::impl::from_functional_tensor(mask);
      } else {
        mask_ = mask;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || mask.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(mask))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::native_dropout_backward_out::call(grad_output_, mask_, scale, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::native_dropout_backward::call(grad_output_, mask_, scale);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & abs_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::abs_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::abs_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::abs::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & abs_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::abs_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::abs_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::abs::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & absolute_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::absolute_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::absolute_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::absolute::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & absolute_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::absolute_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::absolute_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::absolute::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & angle_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::angle_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::angle_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::angle::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & sgn_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sgn_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sgn_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sgn::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & sgn_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sgn_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sgn_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sgn::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & _conj_physical_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_conj_physical_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_conj_physical_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_conj_physical::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & conj_physical_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::conj_physical_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::conj_physical_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::conj_physical::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & conj_physical_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::conj_physical_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::conj_physical_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::conj_physical::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & acos_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::acos_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::acos_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::acos::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & acos_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::acos_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::acos_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::acos::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & arccos_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::arccos_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::arccos_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::arccos::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & arccos_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::arccos_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::arccos_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::arccos::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & avg_pool1d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::avg_pool1d_out::call(self_meta, kernel_size, stride, padding, ceil_mode, count_include_pad, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::avg_pool1d_out::call(self_, kernel_size, stride, padding, ceil_mode, count_include_pad, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::avg_pool1d::call(self_, kernel_size, stride, padding, ceil_mode, count_include_pad);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & adaptive_avg_pool1d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::adaptive_avg_pool1d_out::call(self_meta, output_size, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::adaptive_avg_pool1d_out::call(self_, output_size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::adaptive_avg_pool1d::call(self_, output_size);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & add_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::add_out::call(self_meta, other_meta, alpha, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::add_out::call(self_, other_, alpha, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::add_Tensor::call(self_, other_, alpha);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & add__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::add__Tensor::call(self_meta, other_meta, alpha);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::add__Tensor::call(self_, other_, alpha);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::add_Tensor::call(self_, other_, alpha);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & _add_relu_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_add_relu_out::call(self_meta, other_meta, alpha, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_add_relu_out::call(self_, other_, alpha, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_add_relu_Tensor::call(self_, other_, alpha);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _add_relu__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_add_relu__Tensor::call(self_meta, other_meta, alpha);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_add_relu__Tensor::call(self_, other_, alpha);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_add_relu_Tensor::call(self_, other_, alpha);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & _add_relu_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_add_relu_Scalar_out::call(self_meta, other, alpha, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_add_relu_Scalar_out::call(self_, other, alpha, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_add_relu_Scalar::call(self_, other, alpha);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _add_relu__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_add_relu__Scalar::call(self_meta, other, alpha);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_add_relu__Scalar::call(self_, other, alpha);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_add_relu_Scalar::call(self_, other, alpha);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & add_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::add_Scalar_out::call(self_meta, other, alpha, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::add_Scalar_out::call(self_, other, alpha, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::add_Scalar::call(self_, other, alpha);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & add__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::add__Scalar::call(self_meta, other, alpha);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::add__Scalar::call(self_, other, alpha);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::add_Scalar::call(self_, other, alpha);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & addmv_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto mat_meta = to_meta(mat);
        auto vec_meta = to_meta(vec);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::addmv_out::call(self_meta, mat_meta, vec_meta, beta, alpha, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor mat_;
      if (at::functionalization::impl::isFunctionalTensor(mat)) {
        at::functionalization::impl::sync(mat);
        mat_ = at::functionalization::impl::from_functional_tensor(mat);
      } else {
        mat_ = mat;
      }
      
      at::Tensor vec_;
      if (at::functionalization::impl::isFunctionalTensor(vec)) {
        at::functionalization::impl::sync(vec);
        vec_ = at::functionalization::impl::from_functional_tensor(vec);
      } else {
        vec_ = vec;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || mat.device().type() == c10::DeviceType::XLA || vec.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mat) || at::functionalization::impl::isFunctionalTensor(vec))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::addmv_out::call(self_, mat_, vec_, beta, alpha, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::addmv::call(self_, mat_, vec_, beta, alpha);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & addmv_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto mat_meta = to_meta(mat);
        auto vec_meta = to_meta(vec);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::addmv_::call(self_meta, mat_meta, vec_meta, beta, alpha);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor mat_;
      if (at::functionalization::impl::isFunctionalTensor(mat)) {
        at::functionalization::impl::sync(mat);
        mat_ = at::functionalization::impl::from_functional_tensor(mat);
      } else {
        mat_ = mat;
      }
      
      at::Tensor vec_;
      if (at::functionalization::impl::isFunctionalTensor(vec)) {
        at::functionalization::impl::sync(vec);
        vec_ = at::functionalization::impl::from_functional_tensor(vec);
      } else {
        vec_ = vec;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || mat.device().type() == c10::DeviceType::XLA || vec.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(mat) || at::functionalization::impl::isFunctionalTensor(vec))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::addmv_::call(self_, mat_, vec_, beta, alpha);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::addmv::call(self_, mat_, vec_, beta, alpha);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & addr_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto vec1_meta = to_meta(vec1);
        auto vec2_meta = to_meta(vec2);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::addr_out::call(self_meta, vec1_meta, vec2_meta, beta, alpha, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor vec1_;
      if (at::functionalization::impl::isFunctionalTensor(vec1)) {
        at::functionalization::impl::sync(vec1);
        vec1_ = at::functionalization::impl::from_functional_tensor(vec1);
      } else {
        vec1_ = vec1;
      }
      
      at::Tensor vec2_;
      if (at::functionalization::impl::isFunctionalTensor(vec2)) {
        at::functionalization::impl::sync(vec2);
        vec2_ = at::functionalization::impl::from_functional_tensor(vec2);
      } else {
        vec2_ = vec2;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || vec1.device().type() == c10::DeviceType::XLA || vec2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(vec1) || at::functionalization::impl::isFunctionalTensor(vec2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::addr_out::call(self_, vec1_, vec2_, beta, alpha, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::addr::call(self_, vec1_, vec2_, beta, alpha);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & addr_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto vec1_meta = to_meta(vec1);
        auto vec2_meta = to_meta(vec2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::addr_::call(self_meta, vec1_meta, vec2_meta, beta, alpha);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor vec1_;
      if (at::functionalization::impl::isFunctionalTensor(vec1)) {
        at::functionalization::impl::sync(vec1);
        vec1_ = at::functionalization::impl::from_functional_tensor(vec1);
      } else {
        vec1_ = vec1;
      }
      
      at::Tensor vec2_;
      if (at::functionalization::impl::isFunctionalTensor(vec2)) {
        at::functionalization::impl::sync(vec2);
        vec2_ = at::functionalization::impl::from_functional_tensor(vec2);
      } else {
        vec2_ = vec2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || vec1.device().type() == c10::DeviceType::XLA || vec2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(vec1) || at::functionalization::impl::isFunctionalTensor(vec2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::addr_::call(self_, vec1_, vec2_, beta, alpha);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::addr::call(self_, vec1_, vec2_, beta, alpha);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & affine_grid_generator_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto theta_meta = to_meta(theta);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::affine_grid_generator_out::call(theta_meta, size, align_corners, out_meta);
      }
      
      at::Tensor theta_;
      if (at::functionalization::impl::isFunctionalTensor(theta)) {
        at::functionalization::impl::sync(theta);
        theta_ = at::functionalization::impl::from_functional_tensor(theta);
      } else {
        theta_ = theta;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || theta.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(theta))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::affine_grid_generator_out::call(theta_, size, align_corners, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::affine_grid_generator::call(theta_, size, align_corners);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _test_functorch_fallback_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_test_functorch_fallback_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_test_functorch_fallback_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_test_functorch_fallback::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & all_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::all_out::call(self_meta, dim, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::all_out::call(self_, dim, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::all_dim::call(self_, dim, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & all_out_dims_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::all_dims_out::call(self_meta, dim, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::all_dims_out::call(self_, dim, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::all_dims::call(self_, dim, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & all_out_dimname_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::all_dimname_out::call(self_meta, dim, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::all_dimname_out::call(self_, dim, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::all_dimname::call(self_, dim, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & any_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::any_out::call(self_meta, dim, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::any_out::call(self_, dim, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::any_dim::call(self_, dim, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & any_out_dims_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::any_dims_out::call(self_meta, dim, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::any_dims_out::call(self_, dim, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::any_dims::call(self_, dim, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & any_out_dimname_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::any_dimname_out::call(self_meta, dim, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::any_dimname_out::call(self_, dim, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::any_dimname::call(self_, dim, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & arange_out_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & end, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::arange_out::call(end, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::arange_out::call(end, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::arange::call(end, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & arange_out_start_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::arange_start_out::call(start, end, step, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::arange_start_out::call(start, end, step, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::arange_start_step::call(start, end, step, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & argmax_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::argmax_out::call(self_meta, dim, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::argmax_out::call(self_, dim, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::argmax::call(self_, dim, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & argmin_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::argmin_out::call(self_meta, dim, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::argmin_out::call(self_, dim, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::argmin::call(self_, dim, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & acosh_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::acosh_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::acosh_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::acosh::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & acosh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::acosh_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::acosh_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::acosh::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & arccosh_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::arccosh_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::arccosh_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::arccosh::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & arccosh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::arccosh_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::arccosh_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::arccosh::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & asinh_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::asinh_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::asinh_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::asinh::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & asinh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::asinh_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::asinh_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::asinh::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & arcsinh_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::arcsinh_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::arcsinh_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::arcsinh::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & arcsinh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::arcsinh_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::arcsinh_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::arcsinh::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & atanh_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::atanh_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::atanh_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::atanh::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & atanh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::atanh_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::atanh_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::atanh::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & arctanh_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::arctanh_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::arctanh_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::arctanh::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & arctanh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::arctanh_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::arctanh_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::arctanh::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & asin_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::asin_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::asin_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::asin::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & asin_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::asin_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::asin_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::asin::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & arcsin_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::arcsin_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::arcsin_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::arcsin::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & arcsin_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::arcsin_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::arcsin_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::arcsin::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & atan_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::atan_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::atan_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::atan::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & atan_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::atan_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::atan_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::atan::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & arctan_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::arctan_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::arctan_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::arctan::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & arctan_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::arctan_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::arctan_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::arctan::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & baddbmm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto batch1_meta = to_meta(batch1);
        auto batch2_meta = to_meta(batch2);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::baddbmm_out::call(self_meta, batch1_meta, batch2_meta, beta, alpha, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor batch1_;
      if (at::functionalization::impl::isFunctionalTensor(batch1)) {
        at::functionalization::impl::sync(batch1);
        batch1_ = at::functionalization::impl::from_functional_tensor(batch1);
      } else {
        batch1_ = batch1;
      }
      
      at::Tensor batch2_;
      if (at::functionalization::impl::isFunctionalTensor(batch2)) {
        at::functionalization::impl::sync(batch2);
        batch2_ = at::functionalization::impl::from_functional_tensor(batch2);
      } else {
        batch2_ = batch2;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || batch1.device().type() == c10::DeviceType::XLA || batch2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(batch1) || at::functionalization::impl::isFunctionalTensor(batch2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::baddbmm_out::call(self_, batch1_, batch2_, beta, alpha, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::baddbmm::call(self_, batch1_, batch2_, beta, alpha);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & baddbmm_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto batch1_meta = to_meta(batch1);
        auto batch2_meta = to_meta(batch2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::baddbmm_::call(self_meta, batch1_meta, batch2_meta, beta, alpha);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor batch1_;
      if (at::functionalization::impl::isFunctionalTensor(batch1)) {
        at::functionalization::impl::sync(batch1);
        batch1_ = at::functionalization::impl::from_functional_tensor(batch1);
      } else {
        batch1_ = batch1;
      }
      
      at::Tensor batch2_;
      if (at::functionalization::impl::isFunctionalTensor(batch2)) {
        at::functionalization::impl::sync(batch2);
        batch2_ = at::functionalization::impl::from_functional_tensor(batch2);
      } else {
        batch2_ = batch2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || batch1.device().type() == c10::DeviceType::XLA || batch2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(batch1) || at::functionalization::impl::isFunctionalTensor(batch2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::baddbmm_::call(self_, batch1_, batch2_, beta, alpha);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::baddbmm::call(self_, batch1_, batch2_, beta, alpha);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & bartlett_window_out_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bartlett_window_out::call(window_length, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bartlett_window_out::call(window_length, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bartlett_window::call(window_length, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & bartlett_window_out_periodic_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bartlett_window_periodic_out::call(window_length, periodic, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bartlett_window_periodic_out::call(window_length, periodic, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bartlett_window_periodic::call(window_length, periodic, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & quantized_batch_norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto mean_meta = to_meta(mean);
        auto var_meta = to_meta(var);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::quantized_batch_norm_out::call(input_meta, weight_meta, bias_meta, mean_meta, var_meta, eps, output_scale, output_zero_point, out_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor mean_;
      if (at::functionalization::impl::isFunctionalTensor(mean)) {
        at::functionalization::impl::sync(mean);
        mean_ = at::functionalization::impl::from_functional_tensor(mean);
      } else {
        mean_ = mean;
      }
      
      at::Tensor var_;
      if (at::functionalization::impl::isFunctionalTensor(var)) {
        at::functionalization::impl::sync(var);
        var_ = at::functionalization::impl::from_functional_tensor(var);
      } else {
        var_ = var;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || mean.device().type() == c10::DeviceType::XLA || var.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias) || at::functionalization::impl::isFunctionalTensor(mean) || at::functionalization::impl::isFunctionalTensor(var))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::quantized_batch_norm_out::call(input_, weight_, bias_, mean_, var_, eps, output_scale, output_zero_point, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::quantized_batch_norm::call(input_, weight_, bias_, mean_, var_, eps, output_scale, output_zero_point);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & bernoulli_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bernoulli_out::call(self_meta, generator, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bernoulli_out::call(self_, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bernoulli::call(self_, generator);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & bernoulli_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & p, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto p_meta = to_meta(p);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bernoulli_Tensor_out::call(self_meta, p_meta, generator, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor p_;
      if (at::functionalization::impl::isFunctionalTensor(p)) {
        at::functionalization::impl::sync(p);
        p_ = at::functionalization::impl::from_functional_tensor(p);
      } else {
        p_ = p;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || p.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(p))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bernoulli_Tensor_out::call(self_, p_, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bernoulli_Tensor::call(self_, p_, generator);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & bernoulli__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & p, ::std::optional<at::Generator> generator) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto p_meta = to_meta(p);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bernoulli__Tensor::call(self_meta, p_meta, generator);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor p_;
      if (at::functionalization::impl::isFunctionalTensor(p)) {
        at::functionalization::impl::sync(p);
        p_ = at::functionalization::impl::from_functional_tensor(p);
      } else {
        p_ = p;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || p.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(p))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bernoulli__Tensor::call(self_, p_, generator);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bernoulli_Tensor::call(self_, p_, generator);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & bernoulli_out_float_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bernoulli_float_out::call(self_meta, p, generator, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bernoulli_float_out::call(self_, p, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bernoulli_p::call(self_, p, generator);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & bernoulli__float(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bernoulli__float::call(self_meta, p, generator);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bernoulli__float::call(self_, p, generator);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bernoulli_p::call(self_, p, generator);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & binary_cross_entropy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto target_meta = to_meta(target);
        auto weight_meta = to_meta(weight);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::binary_cross_entropy_out::call(self_meta, target_meta, weight_meta, reduction, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor target_;
      if (at::functionalization::impl::isFunctionalTensor(target)) {
        at::functionalization::impl::sync(target);
        target_ = at::functionalization::impl::from_functional_tensor(target);
      } else {
        target_ = target;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || target.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::binary_cross_entropy_out::call(self_, target_, weight_, reduction, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::binary_cross_entropy::call(self_, target_, weight_, reduction);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & binary_cross_entropy_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto target_meta = to_meta(target);
        auto weight_meta = to_meta(weight);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::binary_cross_entropy_backward_grad_input::call(grad_output_meta, self_meta, target_meta, weight_meta, reduction, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor target_;
      if (at::functionalization::impl::isFunctionalTensor(target)) {
        at::functionalization::impl::sync(target);
        target_ = at::functionalization::impl::from_functional_tensor(target);
      } else {
        target_ = target;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA || target.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::binary_cross_entropy_backward_grad_input::call(grad_output_, self_, target_, weight_, reduction, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::binary_cross_entropy_backward::call(grad_output_, self_, target_, weight_, reduction);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & binary_cross_entropy_with_logits_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & pos_weight, int64_t reduction, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto target_meta = to_meta(target);
        auto weight_meta = to_meta(weight);
        auto pos_weight_meta = to_meta(pos_weight);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::binary_cross_entropy_with_logits_out::call(self_meta, target_meta, weight_meta, pos_weight_meta, reduction, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor target_;
      if (at::functionalization::impl::isFunctionalTensor(target)) {
        at::functionalization::impl::sync(target);
        target_ = at::functionalization::impl::from_functional_tensor(target);
      } else {
        target_ = target;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> pos_weight_;
      if (at::functionalization::impl::isFunctionalTensor(pos_weight)) {
        at::functionalization::impl::sync(pos_weight);
        pos_weight_ = at::functionalization::impl::from_functional_tensor(pos_weight);
      } else {
        pos_weight_ = pos_weight;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || target.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(pos_weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::binary_cross_entropy_with_logits_out::call(self_, target_, weight_, pos_weight_, reduction, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::binary_cross_entropy_with_logits::call(self_, target_, weight_, pos_weight_, reduction);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & bincount_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Tensor> & weights, int64_t minlength, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weights_meta = to_meta(weights);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bincount_out::call(self_meta, weights_meta, minlength, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      ::std::optional<at::Tensor> weights_;
      if (at::functionalization::impl::isFunctionalTensor(weights)) {
        at::functionalization::impl::sync(weights);
        weights_ = at::functionalization::impl::from_functional_tensor(weights);
      } else {
        weights_ = weights;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weights))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bincount_out::call(self_, weights_, minlength, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bincount::call(self_, weights_, minlength);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & bitwise_not_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_not_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_not_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_not::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & bitwise_not_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_not_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_not_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_not::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & copysign_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::copysign_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::copysign_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::copysign_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & copysign__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::copysign__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::copysign__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::copysign_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & copysign_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::copysign_Scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::copysign_Scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::copysign_Scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & copysign__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::copysign__Scalar::call(self_meta, other);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::copysign__Scalar::call(self_, other);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::copysign_Scalar::call(self_, other);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & logical_not_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::logical_not_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::logical_not_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::logical_not::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & logical_not_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::logical_not_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::logical_not_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::logical_not::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & logical_xor_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::logical_xor_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::logical_xor_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::logical_xor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & logical_xor_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::logical_xor_::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::logical_xor_::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::logical_xor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & logical_and_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::logical_and_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::logical_and_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::logical_and::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & logical_and_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::logical_and_::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::logical_and_::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::logical_and::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & logical_or_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::logical_or_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::logical_or_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::logical_or::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & logical_or_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::logical_or_::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::logical_or_::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::logical_or::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & blackman_window_out_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::blackman_window_out::call(window_length, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::blackman_window_out::call(window_length, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::blackman_window::call(window_length, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & blackman_window_out_periodic_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::blackman_window_periodic_out::call(window_length, periodic, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::blackman_window_periodic_out::call(window_length, periodic, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::blackman_window_periodic::call(window_length, periodic, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & bmm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto mat2_meta = to_meta(mat2);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bmm_out::call(self_meta, mat2_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor mat2_;
      if (at::functionalization::impl::isFunctionalTensor(mat2)) {
        at::functionalization::impl::sync(mat2);
        mat2_ = at::functionalization::impl::from_functional_tensor(mat2);
      } else {
        mat2_ = mat2;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || mat2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mat2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bmm_out::call(self_, mat2_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bmm::call(self_, mat2_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & cat_out_out(c10::DispatchKeySet dispatchKeySet, const at::ITensorListRef & tensors, int64_t dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto tensors_meta = to_meta(tensors);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cat_out::call(tensors_meta, dim, out_meta);
      }
      
      ::std::vector<at::Tensor> tensors_;
      if (at::functionalization::impl::isFunctionalTensor(tensors)) {
        at::functionalization::impl::sync(tensors);
        tensors_ = at::functionalization::impl::from_functional_tensor(tensors);
      } else {
        tensors_ = {tensors.begin(), tensors.end()};
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(tensors))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cat_out::call(tensors_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cat::call(tensors_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & cat_out_names_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto tensors_meta = to_meta(tensors);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cat_names_out::call(tensors_meta, dim, out_meta);
      }
      
      ::std::vector<at::Tensor> tensors_;
      if (at::functionalization::impl::isFunctionalTensor(tensors)) {
        at::functionalization::impl::sync(tensors);
        tensors_ = at::functionalization::impl::from_functional_tensor(tensors);
      } else {
        tensors_ = tensors.vec();
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(tensors))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cat_names_out::call(tensors_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cat_names::call(tensors_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & concat_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto tensors_meta = to_meta(tensors);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::concat_out::call(tensors_meta, dim, out_meta);
      }
      
      ::std::vector<at::Tensor> tensors_;
      if (at::functionalization::impl::isFunctionalTensor(tensors)) {
        at::functionalization::impl::sync(tensors);
        tensors_ = at::functionalization::impl::from_functional_tensor(tensors);
      } else {
        tensors_ = tensors.vec();
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(tensors))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::concat_out::call(tensors_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::concat::call(tensors_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & concat_out_names_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto tensors_meta = to_meta(tensors);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::concat_names_out::call(tensors_meta, dim, out_meta);
      }
      
      ::std::vector<at::Tensor> tensors_;
      if (at::functionalization::impl::isFunctionalTensor(tensors)) {
        at::functionalization::impl::sync(tensors);
        tensors_ = at::functionalization::impl::from_functional_tensor(tensors);
      } else {
        tensors_ = tensors.vec();
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(tensors))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::concat_names_out::call(tensors_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::concat_names::call(tensors_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & concatenate_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto tensors_meta = to_meta(tensors);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::concatenate_out::call(tensors_meta, dim, out_meta);
      }
      
      ::std::vector<at::Tensor> tensors_;
      if (at::functionalization::impl::isFunctionalTensor(tensors)) {
        at::functionalization::impl::sync(tensors);
        tensors_ = at::functionalization::impl::from_functional_tensor(tensors);
      } else {
        tensors_ = tensors.vec();
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(tensors))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::concatenate_out::call(tensors_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::concatenate::call(tensors_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & concatenate_out_names_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto tensors_meta = to_meta(tensors);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::concatenate_names_out::call(tensors_meta, dim, out_meta);
      }
      
      ::std::vector<at::Tensor> tensors_;
      if (at::functionalization::impl::isFunctionalTensor(tensors)) {
        at::functionalization::impl::sync(tensors);
        tensors_ = at::functionalization::impl::from_functional_tensor(tensors);
      } else {
        tensors_ = tensors.vec();
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(tensors))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::concatenate_names_out::call(tensors_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::concatenate_names::call(tensors_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & block_diag_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto tensors_meta = to_meta(tensors);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::block_diag_out::call(tensors_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> tensors_;
      if (at::functionalization::impl::isFunctionalTensor(tensors)) {
        at::functionalization::impl::sync(tensors);
        tensors_ = at::functionalization::impl::from_functional_tensor(tensors);
      } else {
        tensors_ = tensors.vec();
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(tensors))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::block_diag_out::call(tensors_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::block_diag::call(tensors_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & ceil_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::ceil_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::ceil_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::ceil::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & ceil_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::ceil_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::ceil_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::ceil::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & chain_matmul_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList matrices, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto matrices_meta = to_meta(matrices);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::chain_matmul_out::call(matrices_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> matrices_;
      if (at::functionalization::impl::isFunctionalTensor(matrices)) {
        at::functionalization::impl::sync(matrices);
        matrices_ = at::functionalization::impl::from_functional_tensor(matrices);
      } else {
        matrices_ = matrices.vec();
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(matrices))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::chain_matmul_out::call(matrices_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::chain_matmul::call(matrices_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & clamp_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::clamp_out::call(self_meta, min, max, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::clamp_out::call(self_, min, max, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::clamp::call(self_, min, max);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & clamp_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::clamp_::call(self_meta, min, max);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::clamp_::call(self_, min, max);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::clamp::call(self_, min, max);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & clamp_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto min_meta = to_meta(min);
        auto max_meta = to_meta(max);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::clamp_Tensor_out::call(self_meta, min_meta, max_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      ::std::optional<at::Tensor> min_;
      if (at::functionalization::impl::isFunctionalTensor(min)) {
        at::functionalization::impl::sync(min);
        min_ = at::functionalization::impl::from_functional_tensor(min);
      } else {
        min_ = min;
      }
      
      ::std::optional<at::Tensor> max_;
      if (at::functionalization::impl::isFunctionalTensor(max)) {
        at::functionalization::impl::sync(max);
        max_ = at::functionalization::impl::from_functional_tensor(max);
      } else {
        max_ = max;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(min) || at::functionalization::impl::isFunctionalTensor(max))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::clamp_Tensor_out::call(self_, min_, max_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::clamp_Tensor::call(self_, min_, max_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & clamp__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto min_meta = to_meta(min);
        auto max_meta = to_meta(max);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::clamp__Tensor::call(self_meta, min_meta, max_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      ::std::optional<at::Tensor> min_;
      if (at::functionalization::impl::isFunctionalTensor(min)) {
        at::functionalization::impl::sync(min);
        min_ = at::functionalization::impl::from_functional_tensor(min);
      } else {
        min_ = min;
      }
      
      ::std::optional<at::Tensor> max_;
      if (at::functionalization::impl::isFunctionalTensor(max)) {
        at::functionalization::impl::sync(max);
        max_ = at::functionalization::impl::from_functional_tensor(max);
      } else {
        max_ = max;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(min) || at::functionalization::impl::isFunctionalTensor(max))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::clamp__Tensor::call(self_, min_, max_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::clamp_Tensor::call(self_, min_, max_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & clamp_max_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & max, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::clamp_max_out::call(self_meta, max, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::clamp_max_out::call(self_, max, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::clamp_max::call(self_, max);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & clamp_max_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & max) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::clamp_max_::call(self_meta, max);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::clamp_max_::call(self_, max);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::clamp_max::call(self_, max);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & clamp_max_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & max, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto max_meta = to_meta(max);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::clamp_max_Tensor_out::call(self_meta, max_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor max_;
      if (at::functionalization::impl::isFunctionalTensor(max)) {
        at::functionalization::impl::sync(max);
        max_ = at::functionalization::impl::from_functional_tensor(max);
      } else {
        max_ = max;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || max.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(max))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::clamp_max_Tensor_out::call(self_, max_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::clamp_max_Tensor::call(self_, max_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & clamp_max__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & max) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto max_meta = to_meta(max);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::clamp_max__Tensor::call(self_meta, max_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor max_;
      if (at::functionalization::impl::isFunctionalTensor(max)) {
        at::functionalization::impl::sync(max);
        max_ = at::functionalization::impl::from_functional_tensor(max);
      } else {
        max_ = max;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || max.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(max))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::clamp_max__Tensor::call(self_, max_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::clamp_max_Tensor::call(self_, max_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & clamp_min_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::clamp_min_out::call(self_meta, min, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::clamp_min_out::call(self_, min, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::clamp_min::call(self_, min);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & clamp_min_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & min) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::clamp_min_::call(self_meta, min);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::clamp_min_::call(self_, min);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::clamp_min::call(self_, min);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & clamp_min_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & min, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto min_meta = to_meta(min);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::clamp_min_Tensor_out::call(self_meta, min_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor min_;
      if (at::functionalization::impl::isFunctionalTensor(min)) {
        at::functionalization::impl::sync(min);
        min_ = at::functionalization::impl::from_functional_tensor(min);
      } else {
        min_ = min;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || min.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(min))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::clamp_min_Tensor_out::call(self_, min_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::clamp_min_Tensor::call(self_, min_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & clamp_min__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & min) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto min_meta = to_meta(min);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::clamp_min__Tensor::call(self_meta, min_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor min_;
      if (at::functionalization::impl::isFunctionalTensor(min)) {
        at::functionalization::impl::sync(min);
        min_ = at::functionalization::impl::from_functional_tensor(min);
      } else {
        min_ = min;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || min.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(min))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::clamp_min__Tensor::call(self_, min_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::clamp_min_Tensor::call(self_, min_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & clip_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::clip_out::call(self_meta, min, max, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::clip_out::call(self_, min, max, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::clip::call(self_, min, max);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & clip_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const ::std::optional<at::Scalar> & min, const ::std::optional<at::Scalar> & max) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::clip_::call(self_meta, min, max);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::clip_::call(self_, min, max);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::clip::call(self_, min, max);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & clip_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto min_meta = to_meta(min);
        auto max_meta = to_meta(max);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::clip_Tensor_out::call(self_meta, min_meta, max_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      ::std::optional<at::Tensor> min_;
      if (at::functionalization::impl::isFunctionalTensor(min)) {
        at::functionalization::impl::sync(min);
        min_ = at::functionalization::impl::from_functional_tensor(min);
      } else {
        min_ = min;
      }
      
      ::std::optional<at::Tensor> max_;
      if (at::functionalization::impl::isFunctionalTensor(max)) {
        at::functionalization::impl::sync(max);
        max_ = at::functionalization::impl::from_functional_tensor(max);
      } else {
        max_ = max;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(min) || at::functionalization::impl::isFunctionalTensor(max))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::clip_Tensor_out::call(self_, min_, max_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::clip_Tensor::call(self_, min_, max_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & clip__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const ::std::optional<at::Tensor> & min, const ::std::optional<at::Tensor> & max) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto min_meta = to_meta(min);
        auto max_meta = to_meta(max);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::clip__Tensor::call(self_meta, min_meta, max_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      ::std::optional<at::Tensor> min_;
      if (at::functionalization::impl::isFunctionalTensor(min)) {
        at::functionalization::impl::sync(min);
        min_ = at::functionalization::impl::from_functional_tensor(min);
      } else {
        min_ = min;
      }
      
      ::std::optional<at::Tensor> max_;
      if (at::functionalization::impl::isFunctionalTensor(max)) {
        at::functionalization::impl::sync(max);
        max_ = at::functionalization::impl::from_functional_tensor(max);
      } else {
        max_ = max;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(min) || at::functionalization::impl::isFunctionalTensor(max))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::clip__Tensor::call(self_, min_, max_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::clip_Tensor::call(self_, min_, max_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & complex_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & real, const at::Tensor & imag, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto real_meta = to_meta(real);
        auto imag_meta = to_meta(imag);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::complex_out::call(real_meta, imag_meta, out_meta);
      }
      
      at::Tensor real_;
      if (at::functionalization::impl::isFunctionalTensor(real)) {
        at::functionalization::impl::sync(real);
        real_ = at::functionalization::impl::from_functional_tensor(real);
      } else {
        real_ = real;
      }
      
      at::Tensor imag_;
      if (at::functionalization::impl::isFunctionalTensor(imag)) {
        at::functionalization::impl::sync(imag);
        imag_ = at::functionalization::impl::from_functional_tensor(imag);
      } else {
        imag_ = imag;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || real.device().type() == c10::DeviceType::XLA || imag.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(real) || at::functionalization::impl::isFunctionalTensor(imag))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::complex_out::call(real_, imag_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::complex::call(real_, imag_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & polar_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & abs, const at::Tensor & angle, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto abs_meta = to_meta(abs);
        auto angle_meta = to_meta(angle);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::polar_out::call(abs_meta, angle_meta, out_meta);
      }
      
      at::Tensor abs_;
      if (at::functionalization::impl::isFunctionalTensor(abs)) {
        at::functionalization::impl::sync(abs);
        abs_ = at::functionalization::impl::from_functional_tensor(abs);
      } else {
        abs_ = abs;
      }
      
      at::Tensor angle_;
      if (at::functionalization::impl::isFunctionalTensor(angle)) {
        at::functionalization::impl::sync(angle);
        angle_ = at::functionalization::impl::from_functional_tensor(angle);
      } else {
        angle_ = angle;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || abs.device().type() == c10::DeviceType::XLA || angle.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(abs) || at::functionalization::impl::isFunctionalTensor(angle))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::polar_out::call(abs_, angle_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::polar::call(abs_, angle_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & constant_pad_nd_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::constant_pad_nd_out::call(self_meta, pad, value, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::constant_pad_nd_out::call(self_, pad, value, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::constant_pad_nd::call(self_, pad, value);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & convolution_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::convolution_out::call(input_meta, weight_meta, bias_meta, stride, padding, dilation, transposed, output_padding, groups, out_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::convolution_out::call(input_, weight_, bias_, stride, padding, dilation, transposed, output_padding, groups, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::convolution::call(input_, weight_, bias_, stride, padding, dilation, transposed, output_padding, groups);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::convolution_backward_out::call(grad_output_meta, input_meta, weight_meta, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0_meta, out1_meta, out2_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || input.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::convolution_backward_out::call(grad_output_, input_, weight_, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0_, out1_, out2_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::convolution_backward::call(grad_output_, input_, weight_, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
      }
    }

    at::Tensor & convolution_overrideable_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::convolution_overrideable_out::call(input_meta, weight_meta, bias_meta, stride, padding, dilation, transposed, output_padding, groups, out_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::convolution_overrideable_out::call(input_, weight_, bias_, stride, padding, dilation, transposed, output_padding, groups, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::convolution_overrideable::call(input_, weight_, bias_, stride, padding, dilation, transposed, output_padding, groups);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_overrideable_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::convolution_backward_overrideable_out::call(grad_output_meta, input_meta, weight_meta, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0_meta, out1_meta, out2_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || input.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::convolution_backward_overrideable_out::call(grad_output_, input_, weight_, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0_, out1_, out2_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::convolution_backward_overrideable::call(grad_output_, input_, weight_, stride, padding, dilation, transposed, output_padding, groups, output_mask);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
      }
    }

    at::Tensor & _convolution_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_convolution_out::call(input_meta, weight_meta, bias_meta, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_convolution_out::call(input_, weight_, bias_, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_convolution::call(input_, weight_, bias_, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & conv_tbc_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::conv_tbc_out::call(self_meta, weight_meta, bias_meta, pad, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA || bias.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::conv_tbc_out::call(self_, weight_, bias_, pad, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::conv_tbc::call(self_, weight_, bias_, pad);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto src_meta = to_meta(src);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::copy_out::call(self_meta, src_meta, non_blocking, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor src_;
      if (at::functionalization::impl::isFunctionalTensor(src)) {
        at::functionalization::impl::sync(src);
        src_ = at::functionalization::impl::from_functional_tensor(src);
      } else {
        src_ = src;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || src.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(src))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::copy_out::call(self_, src_, non_blocking, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::copy::call(self_, src_, non_blocking);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & copy_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & src, bool non_blocking) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto src_meta = to_meta(src);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::copy_::call(self_meta, src_meta, non_blocking);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor src_;
      if (at::functionalization::impl::isFunctionalTensor(src)) {
        at::functionalization::impl::sync(src);
        src_ = at::functionalization::impl::from_functional_tensor(src);
      } else {
        src_ = src;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || src.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(src))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::copy_::call(self_, src_, non_blocking);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::copy::call(self_, src_, non_blocking);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & _copy_from_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & dst, bool non_blocking, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto dst_meta = to_meta(dst);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_copy_from_out::call(self_meta, dst_meta, non_blocking, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor dst_;
      if (at::functionalization::impl::isFunctionalTensor(dst)) {
        at::functionalization::impl::sync(dst);
        dst_ = at::functionalization::impl::from_functional_tensor(dst);
      } else {
        dst_ = dst;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || dst.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(dst))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_copy_from_out::call(self_, dst_, non_blocking, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_copy_from::call(self_, dst_, non_blocking);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _copy_from_and_resize_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & dst, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto dst_meta = to_meta(dst);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_copy_from_and_resize_out::call(self_meta, dst_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor dst_;
      if (at::functionalization::impl::isFunctionalTensor(dst)) {
        at::functionalization::impl::sync(dst);
        dst_ = at::functionalization::impl::from_functional_tensor(dst);
      } else {
        dst_ = dst;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || dst.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(dst))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_copy_from_and_resize_out::call(self_, dst_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_copy_from_and_resize::call(self_, dst_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & cos_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cos_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cos_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cos::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & cos_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cos_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cos_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cos::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & cosh_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cosh_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cosh_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cosh::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & cosh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cosh_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cosh_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cosh::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & count_nonzero_out_dim_IntList_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::count_nonzero_dim_IntList_out::call(self_meta, dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::count_nonzero_dim_IntList_out::call(self_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::count_nonzero_dim_IntList::call(self_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & count_nonzero_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<int64_t> dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::count_nonzero_out::call(self_meta, dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::count_nonzero_out::call(self_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::count_nonzero::call(self_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & cudnn_affine_grid_generator_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto theta_meta = to_meta(theta);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cudnn_affine_grid_generator_out::call(theta_meta, N, C, H, W, out_meta);
      }
      
      at::Tensor theta_;
      if (at::functionalization::impl::isFunctionalTensor(theta)) {
        at::functionalization::impl::sync(theta);
        theta_ = at::functionalization::impl::from_functional_tensor(theta);
      } else {
        theta_ = theta;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || theta.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(theta))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cudnn_affine_grid_generator_out::call(theta_, N, C, H, W, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cudnn_affine_grid_generator::call(theta_, N, C, H, W);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & cudnn_affine_grid_generator_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_meta = to_meta(grad);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cudnn_affine_grid_generator_backward_out::call(grad_meta, N, C, H, W, out_meta);
      }
      
      at::Tensor grad_;
      if (at::functionalization::impl::isFunctionalTensor(grad)) {
        at::functionalization::impl::sync(grad);
        grad_ = at::functionalization::impl::from_functional_tensor(grad);
      } else {
        grad_ = grad;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cudnn_affine_grid_generator_backward_out::call(grad_, N, C, H, W, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cudnn_affine_grid_generator_backward::call(grad_, N, C, H, W);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto running_mean_meta = to_meta(running_mean);
        auto running_var_meta = to_meta(running_var);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        auto out3_meta = to_meta(out3);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cudnn_batch_norm_out::call(input_meta, weight_meta, bias_meta, running_mean_meta, running_var_meta, training, exponential_average_factor, epsilon, out0_meta, out1_meta, out2_meta, out3_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      ::std::optional<at::Tensor> running_mean_;
      if (at::functionalization::impl::isFunctionalTensor(running_mean)) {
        at::functionalization::impl::sync(running_mean);
        running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean);
      } else {
        running_mean_ = running_mean;
      }
      
      ::std::optional<at::Tensor> running_var_;
      if (at::functionalization::impl::isFunctionalTensor(running_var)) {
        at::functionalization::impl::sync(running_var);
        running_var_ = at::functionalization::impl::from_functional_tensor(running_var);
      } else {
        running_var_ = running_var;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      
      at::Tensor out3_;
      if (at::functionalization::impl::isFunctionalTensor(out3)) {
        at::functionalization::impl::sync(out3);
        out3_ = at::functionalization::impl::from_functional_tensor(out3);
      } else {
        out3_ = out3;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias) || at::functionalization::impl::isFunctionalTensor(running_mean) || at::functionalization::impl::isFunctionalTensor(running_var))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::cudnn_batch_norm_out::call(input_, weight_, bias_, running_mean_, running_var_, training, exponential_average_factor, epsilon, out0_, out1_, out2_, out3_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cudnn_batch_norm::call(input_, weight_, bias_, running_mean_, running_var_, training, exponential_average_factor, epsilon);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
  auto out3_inner = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::replace_(out3, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(out3);
  at::functionalization::impl::sync(out3);
  auto out3_inner_updated = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::propagate_xla_data_direct(out3_inner, out3_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto grad_output_meta = to_meta(grad_output);
        auto weight_meta = to_meta(weight);
        auto running_mean_meta = to_meta(running_mean);
        auto running_var_meta = to_meta(running_var);
        auto save_mean_meta = to_meta(save_mean);
        auto save_var_meta = to_meta(save_var);
        auto reserveSpace_meta = to_meta(reserveSpace);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cudnn_batch_norm_backward_out::call(input_meta, grad_output_meta, weight_meta, running_mean_meta, running_var_meta, save_mean_meta, save_var_meta, epsilon, reserveSpace_meta, out0_meta, out1_meta, out2_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> running_mean_;
      if (at::functionalization::impl::isFunctionalTensor(running_mean)) {
        at::functionalization::impl::sync(running_mean);
        running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean);
      } else {
        running_mean_ = running_mean;
      }
      
      ::std::optional<at::Tensor> running_var_;
      if (at::functionalization::impl::isFunctionalTensor(running_var)) {
        at::functionalization::impl::sync(running_var);
        running_var_ = at::functionalization::impl::from_functional_tensor(running_var);
      } else {
        running_var_ = running_var;
      }
      
      ::std::optional<at::Tensor> save_mean_;
      if (at::functionalization::impl::isFunctionalTensor(save_mean)) {
        at::functionalization::impl::sync(save_mean);
        save_mean_ = at::functionalization::impl::from_functional_tensor(save_mean);
      } else {
        save_mean_ = save_mean;
      }
      
      ::std::optional<at::Tensor> save_var_;
      if (at::functionalization::impl::isFunctionalTensor(save_var)) {
        at::functionalization::impl::sync(save_var);
        save_var_ = at::functionalization::impl::from_functional_tensor(save_var);
      } else {
        save_var_ = save_var;
      }
      
      at::Tensor reserveSpace_;
      if (at::functionalization::impl::isFunctionalTensor(reserveSpace)) {
        at::functionalization::impl::sync(reserveSpace);
        reserveSpace_ = at::functionalization::impl::from_functional_tensor(reserveSpace);
      } else {
        reserveSpace_ = reserveSpace;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || grad_output.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA || reserveSpace.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(running_mean) || at::functionalization::impl::isFunctionalTensor(running_var) || at::functionalization::impl::isFunctionalTensor(save_mean) || at::functionalization::impl::isFunctionalTensor(save_var) || at::functionalization::impl::isFunctionalTensor(reserveSpace))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::cudnn_batch_norm_backward_out::call(input_, grad_output_, weight_, running_mean_, running_var_, save_mean_, save_var_, epsilon, reserveSpace_, out0_, out1_, out2_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cudnn_batch_norm_backward::call(input_, grad_output_, weight_, running_mean_, running_var_, save_mean_, save_var_, epsilon, reserveSpace_);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
      }
    }

    at::Tensor & cudnn_convolution_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cudnn_convolution_out::call(self_meta, weight_meta, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cudnn_convolution_out::call(self_, weight_, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cudnn_convolution::call(self_, weight_, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & cudnn_convolution_transpose_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cudnn_convolution_transpose_out::call(self_meta, weight_meta, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cudnn_convolution_transpose_out::call(self_, weight_, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cudnn_convolution_transpose::call(self_, weight_, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _mps_convolution_transpose_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_mps_convolution_transpose_out::call(self_meta, weight_meta, padding, output_padding, stride, dilation, groups, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_mps_convolution_transpose_out::call(self_, weight_, padding, output_padding, stride, dilation, groups, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_mps_convolution_transpose::call(self_, weight_, padding, output_padding, stride, dilation, groups);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> mps_convolution_transpose_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto grad_output_meta = to_meta(grad_output);
        auto weight_meta = to_meta(weight);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mps_convolution_transpose_backward_out::call(self_meta, grad_output_meta, weight_meta, padding, output_padding, stride, dilation, groups, output_mask, out0_meta, out1_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || grad_output.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::mps_convolution_transpose_backward_out::call(self_, grad_output_, weight_, padding, output_padding, stride, dilation, groups, output_mask, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mps_convolution_transpose_backward::call(self_, grad_output_, weight_, padding, output_padding, stride, dilation, groups, output_mask);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    at::Tensor & cudnn_convolution_relu_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cudnn_convolution_relu_out::call(self_meta, weight_meta, bias_meta, stride, padding, dilation, groups, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cudnn_convolution_relu_out::call(self_, weight_, bias_, stride, padding, dilation, groups, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cudnn_convolution_relu::call(self_, weight_, bias_, stride, padding, dilation, groups);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & cudnn_convolution_add_relu_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const ::std::optional<at::Scalar> & alpha, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto z_meta = to_meta(z);
        auto bias_meta = to_meta(bias);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cudnn_convolution_add_relu_out::call(self_meta, weight_meta, z_meta, alpha, bias_meta, stride, padding, dilation, groups, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor z_;
      if (at::functionalization::impl::isFunctionalTensor(z)) {
        at::functionalization::impl::sync(z);
        z_ = at::functionalization::impl::from_functional_tensor(z);
      } else {
        z_ = z;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA || z.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(z) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cudnn_convolution_add_relu_out::call(self_, weight_, z_, alpha, bias_, stride, padding, dilation, groups, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cudnn_convolution_add_relu::call(self_, weight_, z_, alpha, bias_, stride, padding, dilation, groups);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & cudnn_grid_sampler_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto grid_meta = to_meta(grid);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cudnn_grid_sampler_out::call(self_meta, grid_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor grid_;
      if (at::functionalization::impl::isFunctionalTensor(grid)) {
        at::functionalization::impl::sync(grid);
        grid_ = at::functionalization::impl::from_functional_tensor(grid);
      } else {
        grid_ = grid;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || grid.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(grid))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cudnn_grid_sampler_out::call(self_, grid_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cudnn_grid_sampler::call(self_, grid_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> cudnn_grid_sampler_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto grid_meta = to_meta(grid);
        auto grad_output_meta = to_meta(grad_output);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cudnn_grid_sampler_backward_out::call(self_meta, grid_meta, grad_output_meta, out0_meta, out1_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor grid_;
      if (at::functionalization::impl::isFunctionalTensor(grid)) {
        at::functionalization::impl::sync(grid);
        grid_ = at::functionalization::impl::from_functional_tensor(grid);
      } else {
        grid_ = grid;
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || grid.device().type() == c10::DeviceType::XLA || grad_output.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(grid) || at::functionalization::impl::isFunctionalTensor(grad_output))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::cudnn_grid_sampler_backward_out::call(self_, grid_, grad_output_, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cudnn_grid_sampler_backward::call(self_, grid_, grad_output_);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> cummax_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto values_meta = to_meta(values);
        auto indices_meta = to_meta(indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cummax_out::call(self_meta, dim, values_meta, indices_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::cummax_out::call(self_, dim, values_, indices_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cummax::call(self_, dim);
        }
          auto values_inner = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::replace_(values, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(values);
  at::functionalization::impl::sync(values);
  auto values_inner_updated = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::propagate_xla_data_direct(values_inner, values_inner_updated);
  auto indices_inner = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::replace_(indices, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(indices);
  at::functionalization::impl::sync(indices);
  auto indices_inner_updated = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::propagate_xla_data_direct(indices_inner, indices_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> cummax_out_dimname_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto values_meta = to_meta(values);
        auto indices_meta = to_meta(indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cummax_dimname_out::call(self_meta, dim, values_meta, indices_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::cummax_dimname_out::call(self_, dim, values_, indices_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cummax_dimname::call(self_, dim);
        }
          auto values_inner = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::replace_(values, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(values);
  at::functionalization::impl::sync(values);
  auto values_inner_updated = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::propagate_xla_data_direct(values_inner, values_inner_updated);
  auto indices_inner = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::replace_(indices, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(indices);
  at::functionalization::impl::sync(indices);
  auto indices_inner_updated = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::propagate_xla_data_direct(indices_inner, indices_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> cummin_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto values_meta = to_meta(values);
        auto indices_meta = to_meta(indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cummin_out::call(self_meta, dim, values_meta, indices_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::cummin_out::call(self_, dim, values_, indices_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cummin::call(self_, dim);
        }
          auto values_inner = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::replace_(values, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(values);
  at::functionalization::impl::sync(values);
  auto values_inner_updated = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::propagate_xla_data_direct(values_inner, values_inner_updated);
  auto indices_inner = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::replace_(indices, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(indices);
  at::functionalization::impl::sync(indices);
  auto indices_inner_updated = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::propagate_xla_data_direct(indices_inner, indices_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> cummin_out_dimname_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto values_meta = to_meta(values);
        auto indices_meta = to_meta(indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cummin_dimname_out::call(self_meta, dim, values_meta, indices_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::cummin_dimname_out::call(self_, dim, values_, indices_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cummin_dimname::call(self_, dim);
        }
          auto values_inner = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::replace_(values, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(values);
  at::functionalization::impl::sync(values);
  auto values_inner_updated = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::propagate_xla_data_direct(values_inner, values_inner_updated);
  auto indices_inner = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::replace_(indices, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(indices);
  at::functionalization::impl::sync(indices);
  auto indices_inner_updated = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::propagate_xla_data_direct(indices_inner, indices_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
      }
    }

    at::Tensor & cumprod_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cumprod_out::call(self_meta, dim, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cumprod_out::call(self_, dim, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cumprod::call(self_, dim, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & cumprod_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cumprod_::call(self_meta, dim, dtype);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cumprod_::call(self_, dim, dtype);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cumprod::call(self_, dim, dtype);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & cumprod_out_dimname_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cumprod_dimname_out::call(self_meta, dim, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cumprod_dimname_out::call(self_, dim, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cumprod_dimname::call(self_, dim, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & cumprod__dimname(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cumprod__dimname::call(self_meta, dim, dtype);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cumprod__dimname::call(self_, dim, dtype);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cumprod_dimname::call(self_, dim, dtype);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & cumsum_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cumsum_out::call(self_meta, dim, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cumsum_out::call(self_, dim, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cumsum::call(self_, dim, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & cumsum_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cumsum_::call(self_meta, dim, dtype);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cumsum_::call(self_, dim, dtype);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cumsum::call(self_, dim, dtype);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & cumsum_out_dimname_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cumsum_dimname_out::call(self_meta, dim, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cumsum_dimname_out::call(self_, dim, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cumsum_dimname::call(self_, dim, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & cumsum__dimname(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cumsum__dimname::call(self_meta, dim, dtype);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cumsum__dimname::call(self_, dim, dtype);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cumsum_dimname::call(self_, dim, dtype);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto log_probs_meta = to_meta(log_probs);
        auto targets_meta = to_meta(targets);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_ctc_loss_out::call(log_probs_meta, targets_meta, input_lengths, target_lengths, blank, zero_infinity, out0_meta, out1_meta);
      }
      
      at::Tensor log_probs_;
      if (at::functionalization::impl::isFunctionalTensor(log_probs)) {
        at::functionalization::impl::sync(log_probs);
        log_probs_ = at::functionalization::impl::from_functional_tensor(log_probs);
      } else {
        log_probs_ = log_probs;
      }
      
      at::Tensor targets_;
      if (at::functionalization::impl::isFunctionalTensor(targets)) {
        at::functionalization::impl::sync(targets);
        targets_ = at::functionalization::impl::from_functional_tensor(targets);
      } else {
        targets_ = targets;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || log_probs.device().type() == c10::DeviceType::XLA || targets.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(log_probs) || at::functionalization::impl::isFunctionalTensor(targets))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_ctc_loss_out::call(log_probs_, targets_, input_lengths, target_lengths, blank, zero_infinity, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_ctc_loss::call(log_probs_, targets_, input_lengths, target_lengths, blank, zero_infinity);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> _ctc_loss_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto log_probs_meta = to_meta(log_probs);
        auto targets_meta = to_meta(targets);
        auto input_lengths_meta = to_meta(input_lengths);
        auto target_lengths_meta = to_meta(target_lengths);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_ctc_loss_Tensor_out::call(log_probs_meta, targets_meta, input_lengths_meta, target_lengths_meta, blank, zero_infinity, out0_meta, out1_meta);
      }
      
      at::Tensor log_probs_;
      if (at::functionalization::impl::isFunctionalTensor(log_probs)) {
        at::functionalization::impl::sync(log_probs);
        log_probs_ = at::functionalization::impl::from_functional_tensor(log_probs);
      } else {
        log_probs_ = log_probs;
      }
      
      at::Tensor targets_;
      if (at::functionalization::impl::isFunctionalTensor(targets)) {
        at::functionalization::impl::sync(targets);
        targets_ = at::functionalization::impl::from_functional_tensor(targets);
      } else {
        targets_ = targets;
      }
      
      at::Tensor input_lengths_;
      if (at::functionalization::impl::isFunctionalTensor(input_lengths)) {
        at::functionalization::impl::sync(input_lengths);
        input_lengths_ = at::functionalization::impl::from_functional_tensor(input_lengths);
      } else {
        input_lengths_ = input_lengths;
      }
      
      at::Tensor target_lengths_;
      if (at::functionalization::impl::isFunctionalTensor(target_lengths)) {
        at::functionalization::impl::sync(target_lengths);
        target_lengths_ = at::functionalization::impl::from_functional_tensor(target_lengths);
      } else {
        target_lengths_ = target_lengths;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || log_probs.device().type() == c10::DeviceType::XLA || targets.device().type() == c10::DeviceType::XLA || input_lengths.device().type() == c10::DeviceType::XLA || target_lengths.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(log_probs) || at::functionalization::impl::isFunctionalTensor(targets) || at::functionalization::impl::isFunctionalTensor(input_lengths) || at::functionalization::impl::isFunctionalTensor(target_lengths))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_ctc_loss_Tensor_out::call(log_probs_, targets_, input_lengths_, target_lengths_, blank, zero_infinity, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_ctc_loss_Tensor::call(log_probs_, targets_, input_lengths_, target_lengths_, blank, zero_infinity);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    at::Tensor & _ctc_loss_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_meta = to_meta(grad);
        auto log_probs_meta = to_meta(log_probs);
        auto targets_meta = to_meta(targets);
        auto neg_log_likelihood_meta = to_meta(neg_log_likelihood);
        auto log_alpha_meta = to_meta(log_alpha);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_ctc_loss_backward_out::call(grad_meta, log_probs_meta, targets_meta, input_lengths, target_lengths, neg_log_likelihood_meta, log_alpha_meta, blank, zero_infinity, out_meta);
      }
      
      at::Tensor grad_;
      if (at::functionalization::impl::isFunctionalTensor(grad)) {
        at::functionalization::impl::sync(grad);
        grad_ = at::functionalization::impl::from_functional_tensor(grad);
      } else {
        grad_ = grad;
      }
      
      at::Tensor log_probs_;
      if (at::functionalization::impl::isFunctionalTensor(log_probs)) {
        at::functionalization::impl::sync(log_probs);
        log_probs_ = at::functionalization::impl::from_functional_tensor(log_probs);
      } else {
        log_probs_ = log_probs;
      }
      
      at::Tensor targets_;
      if (at::functionalization::impl::isFunctionalTensor(targets)) {
        at::functionalization::impl::sync(targets);
        targets_ = at::functionalization::impl::from_functional_tensor(targets);
      } else {
        targets_ = targets;
      }
      
      at::Tensor neg_log_likelihood_;
      if (at::functionalization::impl::isFunctionalTensor(neg_log_likelihood)) {
        at::functionalization::impl::sync(neg_log_likelihood);
        neg_log_likelihood_ = at::functionalization::impl::from_functional_tensor(neg_log_likelihood);
      } else {
        neg_log_likelihood_ = neg_log_likelihood;
      }
      
      at::Tensor log_alpha_;
      if (at::functionalization::impl::isFunctionalTensor(log_alpha)) {
        at::functionalization::impl::sync(log_alpha);
        log_alpha_ = at::functionalization::impl::from_functional_tensor(log_alpha);
      } else {
        log_alpha_ = log_alpha;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad.device().type() == c10::DeviceType::XLA || log_probs.device().type() == c10::DeviceType::XLA || targets.device().type() == c10::DeviceType::XLA || neg_log_likelihood.device().type() == c10::DeviceType::XLA || log_alpha.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad) || at::functionalization::impl::isFunctionalTensor(log_probs) || at::functionalization::impl::isFunctionalTensor(targets) || at::functionalization::impl::isFunctionalTensor(neg_log_likelihood) || at::functionalization::impl::isFunctionalTensor(log_alpha))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_ctc_loss_backward_out::call(grad_, log_probs_, targets_, input_lengths, target_lengths, neg_log_likelihood_, log_alpha_, blank, zero_infinity, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_ctc_loss_backward::call(grad_, log_probs_, targets_, input_lengths, target_lengths, neg_log_likelihood_, log_alpha_, blank, zero_infinity);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & diag_embed_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::diag_embed_out::call(self_meta, offset, dim1, dim2, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::diag_embed_out::call(self_, offset, dim1, dim2, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::diag_embed::call(self_, offset, dim1, dim2);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & diagonal_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::diagonal_backward_out::call(grad_output_meta, input_sizes, offset, dim1, dim2, out_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::diagonal_backward_out::call(grad_output_, input_sizes, offset, dim1, dim2, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::diagonal_backward::call(grad_output_, input_sizes, offset, dim1, dim2);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & diff_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, int64_t dim, const ::std::optional<at::Tensor> & prepend, const ::std::optional<at::Tensor> & append, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto prepend_meta = to_meta(prepend);
        auto append_meta = to_meta(append);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::diff_out::call(self_meta, n, dim, prepend_meta, append_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      ::std::optional<at::Tensor> prepend_;
      if (at::functionalization::impl::isFunctionalTensor(prepend)) {
        at::functionalization::impl::sync(prepend);
        prepend_ = at::functionalization::impl::from_functional_tensor(prepend);
      } else {
        prepend_ = prepend;
      }
      
      ::std::optional<at::Tensor> append_;
      if (at::functionalization::impl::isFunctionalTensor(append)) {
        at::functionalization::impl::sync(append);
        append_ = at::functionalization::impl::from_functional_tensor(append);
      } else {
        append_ = append;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(prepend) || at::functionalization::impl::isFunctionalTensor(append))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::diff_out::call(self_, n, dim, prepend_, append_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::diff::call(self_, n, dim, prepend_, append_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & div_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::div_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::div_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::div_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & div__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::div__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::div__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::div_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & div_out_out_mode(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::div_out_mode::call(self_meta, other_meta, rounding_mode, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::div_out_mode::call(self_, other_, rounding_mode, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::div_Tensor_mode::call(self_, other_, rounding_mode);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & div__Tensor_mode(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::div__Tensor_mode::call(self_meta, other_meta, rounding_mode);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::div__Tensor_mode::call(self_, other_, rounding_mode);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::div_Tensor_mode::call(self_, other_, rounding_mode);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & div_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::div_Scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::div_Scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::div_Scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & div__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::div__Scalar::call(self_meta, other);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::div__Scalar::call(self_, other);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::div_Scalar::call(self_, other);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & div_out_Scalar_mode_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, ::std::optional<c10::string_view> rounding_mode, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::div_Scalar_mode_out::call(self_meta, other, rounding_mode, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::div_Scalar_mode_out::call(self_, other, rounding_mode, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::div_Scalar_mode::call(self_, other, rounding_mode);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & div__Scalar_mode(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, ::std::optional<c10::string_view> rounding_mode) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::div__Scalar_mode::call(self_meta, other, rounding_mode);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::div__Scalar_mode::call(self_, other, rounding_mode);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::div_Scalar_mode::call(self_, other, rounding_mode);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & divide_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::divide_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::divide_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::divide_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & divide__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::divide__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::divide__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::divide_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & divide_out_out_mode(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::divide_out_mode::call(self_meta, other_meta, rounding_mode, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::divide_out_mode::call(self_, other_, rounding_mode, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::divide_Tensor_mode::call(self_, other_, rounding_mode);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & divide__Tensor_mode(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::divide__Tensor_mode::call(self_meta, other_meta, rounding_mode);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::divide__Tensor_mode::call(self_, other_, rounding_mode);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::divide_Tensor_mode::call(self_, other_, rounding_mode);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & true_divide_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::true_divide_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::true_divide_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::true_divide_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & true_divide__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::true_divide__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::true_divide__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::true_divide_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & dot_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto tensor_meta = to_meta(tensor);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::dot_out::call(self_meta, tensor_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor tensor_;
      if (at::functionalization::impl::isFunctionalTensor(tensor)) {
        at::functionalization::impl::sync(tensor);
        tensor_ = at::functionalization::impl::from_functional_tensor(tensor);
      } else {
        tensor_ = tensor;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || tensor.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(tensor))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::dot_out::call(self_, tensor_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::dot::call(self_, tensor_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & vdot_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::vdot_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::vdot_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::vdot::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & embedding_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto weight_meta = to_meta(weight);
        auto indices_meta = to_meta(indices);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::embedding_out::call(weight_meta, indices_meta, padding_idx, scale_grad_by_freq, sparse, out_meta);
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || weight.device().type() == c10::DeviceType::XLA || indices.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(indices))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::embedding_out::call(weight_, indices_, padding_idx, scale_grad_by_freq, sparse, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::embedding::call(weight_, indices_, padding_idx, scale_grad_by_freq, sparse);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & embedding_dense_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto indices_meta = to_meta(indices);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::embedding_dense_backward_out::call(grad_output_meta, indices_meta, num_weights, padding_idx, scale_grad_by_freq, out_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || indices.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(indices))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::embedding_dense_backward_out::call(grad_output_, indices_, num_weights, padding_idx, scale_grad_by_freq, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::embedding_dense_backward::call(grad_output_, indices_, num_weights, padding_idx, scale_grad_by_freq);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & embedding_renorm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto indices_meta = to_meta(indices);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::embedding_renorm_out::call(self_meta, indices_meta, max_norm, norm_type, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || indices.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::embedding_renorm_out::call(self_, indices_, max_norm, norm_type, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::embedding_renorm::call(self_, indices_, max_norm, norm_type);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & embedding_renorm_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto indices_meta = to_meta(indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::embedding_renorm_::call(self_meta, indices_meta, max_norm, norm_type);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || indices.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(indices))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::embedding_renorm_::call(self_, indices_, max_norm, norm_type);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::embedding_renorm::call(self_, indices_, max_norm, norm_type);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _embedding_bag_forward_only_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto weight_meta = to_meta(weight);
        auto indices_meta = to_meta(indices);
        auto offsets_meta = to_meta(offsets);
        auto per_sample_weights_meta = to_meta(per_sample_weights);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        auto out3_meta = to_meta(out3);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_embedding_bag_forward_only_out::call(weight_meta, indices_meta, offsets_meta, scale_grad_by_freq, mode, sparse, per_sample_weights_meta, include_last_offset, padding_idx, out0_meta, out1_meta, out2_meta, out3_meta);
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      
      at::Tensor offsets_;
      if (at::functionalization::impl::isFunctionalTensor(offsets)) {
        at::functionalization::impl::sync(offsets);
        offsets_ = at::functionalization::impl::from_functional_tensor(offsets);
      } else {
        offsets_ = offsets;
      }
      
      ::std::optional<at::Tensor> per_sample_weights_;
      if (at::functionalization::impl::isFunctionalTensor(per_sample_weights)) {
        at::functionalization::impl::sync(per_sample_weights);
        per_sample_weights_ = at::functionalization::impl::from_functional_tensor(per_sample_weights);
      } else {
        per_sample_weights_ = per_sample_weights;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      
      at::Tensor out3_;
      if (at::functionalization::impl::isFunctionalTensor(out3)) {
        at::functionalization::impl::sync(out3);
        out3_ = at::functionalization::impl::from_functional_tensor(out3);
      } else {
        out3_ = out3;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || weight.device().type() == c10::DeviceType::XLA || indices.device().type() == c10::DeviceType::XLA || offsets.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(indices) || at::functionalization::impl::isFunctionalTensor(offsets) || at::functionalization::impl::isFunctionalTensor(per_sample_weights))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_embedding_bag_forward_only_out::call(weight_, indices_, offsets_, scale_grad_by_freq, mode, sparse, per_sample_weights_, include_last_offset, padding_idx, out0_, out1_, out2_, out3_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_embedding_bag_forward_only::call(weight_, indices_, offsets_, scale_grad_by_freq, mode, sparse, per_sample_weights_, include_last_offset, padding_idx);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
  auto out3_inner = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::replace_(out3, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(out3);
  at::functionalization::impl::sync(out3);
  auto out3_inner_updated = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::propagate_xla_data_direct(out3_inner, out3_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3);
      }
    }

    at::Tensor & row_stack_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto tensors_meta = to_meta(tensors);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::row_stack_out::call(tensors_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> tensors_;
      if (at::functionalization::impl::isFunctionalTensor(tensors)) {
        at::functionalization::impl::sync(tensors);
        tensors_ = at::functionalization::impl::from_functional_tensor(tensors);
      } else {
        tensors_ = tensors.vec();
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(tensors))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::row_stack_out::call(tensors_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::row_stack::call(tensors_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _embedding_bag_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto weight_meta = to_meta(weight);
        auto indices_meta = to_meta(indices);
        auto offsets_meta = to_meta(offsets);
        auto per_sample_weights_meta = to_meta(per_sample_weights);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        auto out3_meta = to_meta(out3);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_embedding_bag_out::call(weight_meta, indices_meta, offsets_meta, scale_grad_by_freq, mode, sparse, per_sample_weights_meta, include_last_offset, padding_idx, out0_meta, out1_meta, out2_meta, out3_meta);
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      
      at::Tensor offsets_;
      if (at::functionalization::impl::isFunctionalTensor(offsets)) {
        at::functionalization::impl::sync(offsets);
        offsets_ = at::functionalization::impl::from_functional_tensor(offsets);
      } else {
        offsets_ = offsets;
      }
      
      ::std::optional<at::Tensor> per_sample_weights_;
      if (at::functionalization::impl::isFunctionalTensor(per_sample_weights)) {
        at::functionalization::impl::sync(per_sample_weights);
        per_sample_weights_ = at::functionalization::impl::from_functional_tensor(per_sample_weights);
      } else {
        per_sample_weights_ = per_sample_weights;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      
      at::Tensor out3_;
      if (at::functionalization::impl::isFunctionalTensor(out3)) {
        at::functionalization::impl::sync(out3);
        out3_ = at::functionalization::impl::from_functional_tensor(out3);
      } else {
        out3_ = out3;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || weight.device().type() == c10::DeviceType::XLA || indices.device().type() == c10::DeviceType::XLA || offsets.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(indices) || at::functionalization::impl::isFunctionalTensor(offsets) || at::functionalization::impl::isFunctionalTensor(per_sample_weights))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_embedding_bag_out::call(weight_, indices_, offsets_, scale_grad_by_freq, mode, sparse, per_sample_weights_, include_last_offset, padding_idx, out0_, out1_, out2_, out3_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_embedding_bag::call(weight_, indices_, offsets_, scale_grad_by_freq, mode, sparse, per_sample_weights_, include_last_offset, padding_idx);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
  auto out3_inner = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::replace_(out3, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(out3);
  at::functionalization::impl::sync(out3);
  auto out3_inner_updated = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::propagate_xla_data_direct(out3_inner, out3_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3);
      }
    }

    at::Tensor & _embedding_bag_dense_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const ::std::optional<at::Tensor> & per_sample_weights, int64_t padding_idx, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_meta = to_meta(grad);
        auto indices_meta = to_meta(indices);
        auto offset2bag_meta = to_meta(offset2bag);
        auto bag_size_meta = to_meta(bag_size);
        auto maximum_indices_meta = to_meta(maximum_indices);
        auto per_sample_weights_meta = to_meta(per_sample_weights);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_embedding_bag_dense_backward_out::call(grad_meta, indices_meta, offset2bag_meta, bag_size_meta, maximum_indices_meta, num_weights, scale_grad_by_freq, mode, per_sample_weights_meta, padding_idx, out_meta);
      }
      
      at::Tensor grad_;
      if (at::functionalization::impl::isFunctionalTensor(grad)) {
        at::functionalization::impl::sync(grad);
        grad_ = at::functionalization::impl::from_functional_tensor(grad);
      } else {
        grad_ = grad;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      
      at::Tensor offset2bag_;
      if (at::functionalization::impl::isFunctionalTensor(offset2bag)) {
        at::functionalization::impl::sync(offset2bag);
        offset2bag_ = at::functionalization::impl::from_functional_tensor(offset2bag);
      } else {
        offset2bag_ = offset2bag;
      }
      
      at::Tensor bag_size_;
      if (at::functionalization::impl::isFunctionalTensor(bag_size)) {
        at::functionalization::impl::sync(bag_size);
        bag_size_ = at::functionalization::impl::from_functional_tensor(bag_size);
      } else {
        bag_size_ = bag_size;
      }
      
      at::Tensor maximum_indices_;
      if (at::functionalization::impl::isFunctionalTensor(maximum_indices)) {
        at::functionalization::impl::sync(maximum_indices);
        maximum_indices_ = at::functionalization::impl::from_functional_tensor(maximum_indices);
      } else {
        maximum_indices_ = maximum_indices;
      }
      
      ::std::optional<at::Tensor> per_sample_weights_;
      if (at::functionalization::impl::isFunctionalTensor(per_sample_weights)) {
        at::functionalization::impl::sync(per_sample_weights);
        per_sample_weights_ = at::functionalization::impl::from_functional_tensor(per_sample_weights);
      } else {
        per_sample_weights_ = per_sample_weights;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad.device().type() == c10::DeviceType::XLA || indices.device().type() == c10::DeviceType::XLA || offset2bag.device().type() == c10::DeviceType::XLA || bag_size.device().type() == c10::DeviceType::XLA || maximum_indices.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad) || at::functionalization::impl::isFunctionalTensor(indices) || at::functionalization::impl::isFunctionalTensor(offset2bag) || at::functionalization::impl::isFunctionalTensor(bag_size) || at::functionalization::impl::isFunctionalTensor(maximum_indices) || at::functionalization::impl::isFunctionalTensor(per_sample_weights))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_embedding_bag_dense_backward_out::call(grad_, indices_, offset2bag_, bag_size_, maximum_indices_, num_weights, scale_grad_by_freq, mode, per_sample_weights_, padding_idx, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_embedding_bag_dense_backward::call(grad_, indices_, offset2bag_, bag_size_, maximum_indices_, num_weights, scale_grad_by_freq, mode, per_sample_weights_, padding_idx);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _embedding_bag_per_sample_weights_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_meta = to_meta(grad);
        auto weight_meta = to_meta(weight);
        auto indices_meta = to_meta(indices);
        auto offsets_meta = to_meta(offsets);
        auto offset2bag_meta = to_meta(offset2bag);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_embedding_bag_per_sample_weights_backward_out::call(grad_meta, weight_meta, indices_meta, offsets_meta, offset2bag_meta, mode, padding_idx, out_meta);
      }
      
      at::Tensor grad_;
      if (at::functionalization::impl::isFunctionalTensor(grad)) {
        at::functionalization::impl::sync(grad);
        grad_ = at::functionalization::impl::from_functional_tensor(grad);
      } else {
        grad_ = grad;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      
      at::Tensor offsets_;
      if (at::functionalization::impl::isFunctionalTensor(offsets)) {
        at::functionalization::impl::sync(offsets);
        offsets_ = at::functionalization::impl::from_functional_tensor(offsets);
      } else {
        offsets_ = offsets;
      }
      
      at::Tensor offset2bag_;
      if (at::functionalization::impl::isFunctionalTensor(offset2bag)) {
        at::functionalization::impl::sync(offset2bag);
        offset2bag_ = at::functionalization::impl::from_functional_tensor(offset2bag);
      } else {
        offset2bag_ = offset2bag;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA || indices.device().type() == c10::DeviceType::XLA || offsets.device().type() == c10::DeviceType::XLA || offset2bag.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(indices) || at::functionalization::impl::isFunctionalTensor(offsets) || at::functionalization::impl::isFunctionalTensor(offset2bag))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_embedding_bag_per_sample_weights_backward_out::call(grad_, weight_, indices_, offsets_, offset2bag_, mode, padding_idx, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_embedding_bag_per_sample_weights_backward::call(grad_, weight_, indices_, offsets_, offset2bag_, mode, padding_idx);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & empty_out_names_out(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, ::std::optional<at::DimnameList> names, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::empty_names_out::call(size, names, memory_format, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::empty_names_out::call(size, names, memory_format, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::empty_names::call(size, names, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt, memory_format);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & empty_out_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::empty_out::call(size, memory_format, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::empty_out::call(size, memory_format, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::empty_memory_format::call(size, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt, memory_format);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & empty_permuted_out_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::IntArrayRef physical_layout, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::empty_permuted_out::call(size, physical_layout, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::empty_permuted_out::call(size, physical_layout, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::empty_permuted::call(size, physical_layout, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & new_empty_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::new_empty_out::call(self_meta, size, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::new_empty_out::call(self_, size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::new_empty::call(self_, size, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & new_empty_strided_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::new_empty_strided_out::call(self_meta, size, stride, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::new_empty_strided_out::call(self_, size, stride, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::new_empty_strided::call(self_, size, stride, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & new_full_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::new_full_out::call(self_meta, size, fill_value, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::new_full_out::call(self_, size, fill_value, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::new_full::call(self_, size, fill_value, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & new_zeros_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::new_zeros_out::call(self_meta, size, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::new_zeros_out::call(self_, size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::new_zeros::call(self_, size, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & new_ones_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::new_ones_out::call(self_meta, size, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::new_ones_out::call(self_, size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::new_ones::call(self_, size, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _empty_affine_quantized_out_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, double scale, int64_t zero_point, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_empty_affine_quantized_out::call(size, scale, zero_point, memory_format, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_empty_affine_quantized_out::call(size, scale, zero_point, memory_format, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_empty_affine_quantized::call(size, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt, scale, zero_point, memory_format);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _empty_per_channel_affine_quantized_out_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto scales_meta = to_meta(scales);
        auto zero_points_meta = to_meta(zero_points);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_empty_per_channel_affine_quantized_out::call(size, scales_meta, zero_points_meta, axis, memory_format, out_meta);
      }
      
      at::Tensor scales_;
      if (at::functionalization::impl::isFunctionalTensor(scales)) {
        at::functionalization::impl::sync(scales);
        scales_ = at::functionalization::impl::from_functional_tensor(scales);
      } else {
        scales_ = scales;
      }
      
      at::Tensor zero_points_;
      if (at::functionalization::impl::isFunctionalTensor(zero_points)) {
        at::functionalization::impl::sync(zero_points);
        zero_points_ = at::functionalization::impl::from_functional_tensor(zero_points);
      } else {
        zero_points_ = zero_points;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || scales.device().type() == c10::DeviceType::XLA || zero_points.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(scales) || at::functionalization::impl::isFunctionalTensor(zero_points))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_empty_per_channel_affine_quantized_out::call(size, scales_, zero_points_, axis, memory_format, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_empty_per_channel_affine_quantized::call(size, scales_, zero_points_, axis, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt, memory_format);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    const at::Tensor & resize_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional<at::MemoryFormat> memory_format, const at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::resize_out::call(self_meta, size, memory_format, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::resize_out::call(self_, size, memory_format, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::resize::call(self_, size, memory_format);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    const at::Tensor & resize_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional<at::MemoryFormat> memory_format) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::resize_::call(self_meta, size, memory_format);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::resize_::call(self_, size, memory_format);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::resize::call(self_, size, memory_format);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    const at::Tensor & _resize_output_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Device device, const at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_resize_output_out::call(self_meta, size, device, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_resize_output_out::call(self_, size, device, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_resize_output::call(self_, size, device);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    const at::Tensor & _resize_output_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Device device) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_resize_output_::call(self_meta, size, device);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_resize_output_::call(self_, size, device);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_resize_output::call(self_, size, device);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & empty_quantized_out_out(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Tensor & qtensor, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto qtensor_meta = to_meta(qtensor);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::empty_quantized_out::call(size, qtensor_meta, memory_format, out_meta);
      }
      
      at::Tensor qtensor_;
      if (at::functionalization::impl::isFunctionalTensor(qtensor)) {
        at::functionalization::impl::sync(qtensor);
        qtensor_ = at::functionalization::impl::from_functional_tensor(qtensor);
      } else {
        qtensor_ = qtensor;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || qtensor.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(qtensor))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::empty_quantized_out::call(size, qtensor_, memory_format, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::empty_quantized::call(size, qtensor_, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt, memory_format);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & empty_like_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::empty_like_out::call(self_meta, memory_format, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::empty_like_out::call(self_, memory_format, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::empty_like::call(self_, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt, memory_format);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & empty_strided_out_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::empty_strided_out::call(size, stride, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::empty_strided_out::call(size, stride, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::empty_strided::call(size, stride, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & erf_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::erf_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::erf_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::erf::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & erf_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::erf_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::erf_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::erf::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & erfc_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::erfc_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::erfc_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::erfc::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & erfc_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::erfc_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::erfc_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::erfc::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & exp_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::exp_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::exp_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::exp::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & exp_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::exp_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::exp_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::exp::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & exp2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::exp2_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::exp2_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::exp2::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & exp2_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::exp2_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::exp2_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::exp2::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & expm1_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::expm1_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::expm1_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::expm1::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & expm1_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::expm1_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::expm1_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::expm1::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & eye_out_out(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::eye_out::call(n, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::eye_out::call(n, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::eye::call(n, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & eye_out_m_out(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, c10::SymInt m, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::eye_m_out::call(n, m, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::eye_m_out::call(n, m, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::eye_m::call(n, m, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fill_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & value, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fill_Scalar_out::call(self_meta, value, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fill_Scalar_out::call(self_, value, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fill_Scalar::call(self_, value);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fill__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & value) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fill__Scalar::call(self_meta, value);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fill__Scalar::call(self_, value);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fill_Scalar::call(self_, value);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & fill_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & value, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto value_meta = to_meta(value);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fill_Tensor_out::call(self_meta, value_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor value_;
      if (at::functionalization::impl::isFunctionalTensor(value)) {
        at::functionalization::impl::sync(value);
        value_ = at::functionalization::impl::from_functional_tensor(value);
      } else {
        value_ = value;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || value.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(value))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fill_Tensor_out::call(self_, value_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fill_Tensor::call(self_, value_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fill__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & value) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto value_meta = to_meta(value);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fill__Tensor::call(self_meta, value_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor value_;
      if (at::functionalization::impl::isFunctionalTensor(value)) {
        at::functionalization::impl::sync(value);
        value_ = at::functionalization::impl::from_functional_tensor(value);
      } else {
        value_ = value;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || value.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(value))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fill__Tensor::call(self_, value_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fill_Tensor::call(self_, value_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & floor_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::floor_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::floor_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::floor::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & floor_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::floor_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::floor_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::floor::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & floor_divide_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::floor_divide_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::floor_divide_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::floor_divide::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & floor_divide__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::floor_divide__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::floor_divide__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::floor_divide::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & floor_divide_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::floor_divide_Scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::floor_divide_Scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::floor_divide_Scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & floor_divide__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::floor_divide__Scalar::call(self_meta, other);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::floor_divide__Scalar::call(self_, other);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::floor_divide_Scalar::call(self_, other);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & frac_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::frac_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::frac_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::frac::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & frac_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::frac_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::frac_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::frac::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & full_out_names_out(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional<at::DimnameList> names, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::full_names_out::call(size, fill_value, names, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::full_names_out::call(size, fill_value, names, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::full_names::call(size, fill_value, names, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & full_out_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::full_out::call(size, fill_value, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::full_out::call(size, fill_value, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::full::call(size, fill_value, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & full_like_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & fill_value, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::full_like_out::call(self_meta, fill_value, memory_format, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::full_like_out::call(self_, fill_value, memory_format, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::full_like::call(self_, fill_value, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt, memory_format);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & from_file_out_out(c10::DispatchKeySet dispatchKeySet, c10::string_view filename, ::std::optional<bool> shared, ::std::optional<int64_t> size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::from_file_out::call(filename, shared, size, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::from_file_out::call(filename, shared, size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::from_file::call(filename, shared, size, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & gcd_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::gcd_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::gcd_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::gcd::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & gcd_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::gcd_::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::gcd_::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::gcd::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & lcm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::lcm_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::lcm_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::lcm::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & lcm_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::lcm_::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::lcm_::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::lcm::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & grid_sampler_2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto grid_meta = to_meta(grid);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::grid_sampler_2d_out::call(input_meta, grid_meta, interpolation_mode, padding_mode, align_corners, out_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor grid_;
      if (at::functionalization::impl::isFunctionalTensor(grid)) {
        at::functionalization::impl::sync(grid);
        grid_ = at::functionalization::impl::from_functional_tensor(grid);
      } else {
        grid_ = grid;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || grid.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(grid))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::grid_sampler_2d_out::call(input_, grid_, interpolation_mode, padding_mode, align_corners, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::grid_sampler_2d::call(input_, grid_, interpolation_mode, padding_mode, align_corners);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> grid_sampler_2d_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto input_meta = to_meta(input);
        auto grid_meta = to_meta(grid);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::grid_sampler_2d_backward_out::call(grad_output_meta, input_meta, grid_meta, interpolation_mode, padding_mode, align_corners, output_mask, out0_meta, out1_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor grid_;
      if (at::functionalization::impl::isFunctionalTensor(grid)) {
        at::functionalization::impl::sync(grid);
        grid_ = at::functionalization::impl::from_functional_tensor(grid);
      } else {
        grid_ = grid;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || input.device().type() == c10::DeviceType::XLA || grid.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(grid))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::grid_sampler_2d_backward_out::call(grad_output_, input_, grid_, interpolation_mode, padding_mode, align_corners, output_mask, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::grid_sampler_2d_backward::call(grad_output_, input_, grid_, interpolation_mode, padding_mode, align_corners, output_mask);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    at::Tensor & _grid_sampler_2d_cpu_fallback_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto grid_meta = to_meta(grid);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_grid_sampler_2d_cpu_fallback_out::call(input_meta, grid_meta, interpolation_mode, padding_mode, align_corners, out_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor grid_;
      if (at::functionalization::impl::isFunctionalTensor(grid)) {
        at::functionalization::impl::sync(grid);
        grid_ = at::functionalization::impl::from_functional_tensor(grid);
      } else {
        grid_ = grid;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || grid.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(grid))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_grid_sampler_2d_cpu_fallback_out::call(input_, grid_, interpolation_mode, padding_mode, align_corners, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_grid_sampler_2d_cpu_fallback::call(input_, grid_, interpolation_mode, padding_mode, align_corners);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & grid_sampler_3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto grid_meta = to_meta(grid);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::grid_sampler_3d_out::call(input_meta, grid_meta, interpolation_mode, padding_mode, align_corners, out_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor grid_;
      if (at::functionalization::impl::isFunctionalTensor(grid)) {
        at::functionalization::impl::sync(grid);
        grid_ = at::functionalization::impl::from_functional_tensor(grid);
      } else {
        grid_ = grid;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || grid.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(grid))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::grid_sampler_3d_out::call(input_, grid_, interpolation_mode, padding_mode, align_corners, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::grid_sampler_3d::call(input_, grid_, interpolation_mode, padding_mode, align_corners);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> grid_sampler_3d_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto input_meta = to_meta(input);
        auto grid_meta = to_meta(grid);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::grid_sampler_3d_backward_out::call(grad_output_meta, input_meta, grid_meta, interpolation_mode, padding_mode, align_corners, output_mask, out0_meta, out1_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor grid_;
      if (at::functionalization::impl::isFunctionalTensor(grid)) {
        at::functionalization::impl::sync(grid);
        grid_ = at::functionalization::impl::from_functional_tensor(grid);
      } else {
        grid_ = grid;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || input.device().type() == c10::DeviceType::XLA || grid.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(grid))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::grid_sampler_3d_backward_out::call(grad_output_, input_, grid_, interpolation_mode, padding_mode, align_corners, output_mask, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::grid_sampler_3d_backward::call(grad_output_, input_, grid_, interpolation_mode, padding_mode, align_corners, output_mask);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    at::Tensor & hann_window_out_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::hann_window_out::call(window_length, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::hann_window_out::call(window_length, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::hann_window::call(window_length, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & hann_window_out_periodic_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::hann_window_periodic_out::call(window_length, periodic, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::hann_window_periodic_out::call(window_length, periodic, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::hann_window_periodic::call(window_length, periodic, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & hamming_window_out_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::hamming_window_out::call(window_length, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::hamming_window_out::call(window_length, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::hamming_window::call(window_length, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & hamming_window_out_periodic_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::hamming_window_periodic_out::call(window_length, periodic, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::hamming_window_periodic_out::call(window_length, periodic, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::hamming_window_periodic::call(window_length, periodic, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & hamming_window_out_periodic_alpha_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::hamming_window_periodic_alpha_out::call(window_length, periodic, alpha, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::hamming_window_periodic_alpha_out::call(window_length, periodic, alpha, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::hamming_window_periodic_alpha::call(window_length, periodic, alpha, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & hamming_window_out_periodic_alpha_beta_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::hamming_window_periodic_alpha_beta_out::call(window_length, periodic, alpha, beta, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::hamming_window_periodic_alpha_beta_out::call(window_length, periodic, alpha, beta, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::hamming_window_periodic_alpha_beta::call(window_length, periodic, alpha, beta, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & kaiser_window_out_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::kaiser_window_out::call(window_length, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::kaiser_window_out::call(window_length, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::kaiser_window::call(window_length, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & kaiser_window_out_periodic_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::kaiser_window_periodic_out::call(window_length, periodic, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::kaiser_window_periodic_out::call(window_length, periodic, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::kaiser_window_periodic::call(window_length, periodic, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & kaiser_window_out_beta_out(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double beta, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::kaiser_window_beta_out::call(window_length, periodic, beta, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::kaiser_window_beta_out::call(window_length, periodic, beta, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::kaiser_window_beta::call(window_length, periodic, beta, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::native_group_norm_out::call(input_meta, weight_meta, bias_meta, N, C, HxW, group, eps, out0_meta, out1_meta, out2_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::native_group_norm_out::call(input_, weight_, bias_, N, C, HxW, group, eps, out0_, out1_, out2_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::native_group_norm::call(input_, weight_, bias_, N, C, HxW, group, eps);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_out_meta = to_meta(grad_out);
        auto input_meta = to_meta(input);
        auto mean_meta = to_meta(mean);
        auto rstd_meta = to_meta(rstd);
        auto weight_meta = to_meta(weight);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::native_group_norm_backward_out::call(grad_out_meta, input_meta, mean_meta, rstd_meta, weight_meta, N, C, HxW, group, output_mask, out0_meta, out1_meta, out2_meta);
      }
      
      at::Tensor grad_out_;
      if (at::functionalization::impl::isFunctionalTensor(grad_out)) {
        at::functionalization::impl::sync(grad_out);
        grad_out_ = at::functionalization::impl::from_functional_tensor(grad_out);
      } else {
        grad_out_ = grad_out;
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor mean_;
      if (at::functionalization::impl::isFunctionalTensor(mean)) {
        at::functionalization::impl::sync(mean);
        mean_ = at::functionalization::impl::from_functional_tensor(mean);
      } else {
        mean_ = mean;
      }
      
      at::Tensor rstd_;
      if (at::functionalization::impl::isFunctionalTensor(rstd)) {
        at::functionalization::impl::sync(rstd);
        rstd_ = at::functionalization::impl::from_functional_tensor(rstd);
      } else {
        rstd_ = rstd;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_out.device().type() == c10::DeviceType::XLA || input.device().type() == c10::DeviceType::XLA || mean.device().type() == c10::DeviceType::XLA || rstd.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_out) || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(mean) || at::functionalization::impl::isFunctionalTensor(rstd) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::native_group_norm_backward_out::call(grad_out_, input_, mean_, rstd_, weight_, N, C, HxW, group, output_mask, out0_, out1_, out2_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::native_group_norm_backward::call(grad_out_, input_, mean_, rstd_, weight_, N, C, HxW, group, output_mask);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
      }
    }

    at::Tensor & _fft_r2c_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_fft_r2c_out::call(self_meta, dim, normalization, onesided, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_fft_r2c_out::call(self_, dim, normalization, onesided, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_fft_r2c::call(self_, dim, normalization, onesided);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _fft_c2r_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, c10::SymInt last_dim_size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_fft_c2r_out::call(self_meta, dim, normalization, last_dim_size, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_fft_c2r_out::call(self_, dim, normalization, last_dim_size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_fft_c2r::call(self_, dim, normalization, last_dim_size);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _fft_c2c_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_fft_c2c_out::call(self_meta, dim, normalization, forward, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_fft_c2c_out::call(self_, dim, normalization, forward, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_fft_c2c::call(self_, dim, normalization, forward);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & index_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto indices_meta = to_meta(indices);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::index_Tensor_out::call(self_meta, indices_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      c10::List<::std::optional<at::Tensor>> indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::index_Tensor_out::call(self_, indices_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::index_Tensor::call(self_, indices_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & index_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto source_meta = to_meta(source);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::index_copy_out::call(self_meta, dim, index_meta, source_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor source_;
      if (at::functionalization::impl::isFunctionalTensor(source)) {
        at::functionalization::impl::sync(source);
        source_ = at::functionalization::impl::from_functional_tensor(source);
      } else {
        source_ = source;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || index.device().type() == c10::DeviceType::XLA || source.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(source))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::index_copy_out::call(self_, dim, index_, source_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::index_copy::call(self_, dim, index_, source_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & index_copy_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto source_meta = to_meta(source);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::index_copy_::call(self_meta, dim, index_meta, source_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor source_;
      if (at::functionalization::impl::isFunctionalTensor(source)) {
        at::functionalization::impl::sync(source);
        source_ = at::functionalization::impl::from_functional_tensor(source);
      } else {
        source_ = source;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || index.device().type() == c10::DeviceType::XLA || source.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(source))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::index_copy_::call(self_, dim, index_, source_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::index_copy::call(self_, dim, index_, source_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & index_put_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto indices_meta = to_meta(indices);
        auto values_meta = to_meta(values);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::index_put_out::call(self_meta, indices_meta, values_meta, accumulate, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      c10::List<::std::optional<at::Tensor>> indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || values.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices) || at::functionalization::impl::isFunctionalTensor(values))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::index_put_out::call(self_, indices_, values_, accumulate, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::index_put::call(self_, indices_, values_, accumulate);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & index_put_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto indices_meta = to_meta(indices);
        auto values_meta = to_meta(values);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::index_put_::call(self_meta, indices_meta, values_meta, accumulate);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      c10::List<::std::optional<at::Tensor>> indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || values.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(indices) || at::functionalization::impl::isFunctionalTensor(values))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::index_put_::call(self_, indices_, values_, accumulate);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::index_put::call(self_, indices_, values_, accumulate);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & _index_put_impl_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto indices_meta = to_meta(indices);
        auto values_meta = to_meta(values);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_index_put_impl_out::call(self_meta, indices_meta, values_meta, accumulate, unsafe, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      c10::List<::std::optional<at::Tensor>> indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || values.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices) || at::functionalization::impl::isFunctionalTensor(values))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_index_put_impl_out::call(self_, indices_, values_, accumulate, unsafe, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_index_put_impl::call(self_, indices_, values_, accumulate, unsafe);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _index_put_impl_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto indices_meta = to_meta(indices);
        auto values_meta = to_meta(values);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_index_put_impl_::call(self_meta, indices_meta, values_meta, accumulate, unsafe);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      c10::List<::std::optional<at::Tensor>> indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || values.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(indices) || at::functionalization::impl::isFunctionalTensor(values))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_index_put_impl_::call(self_, indices_, values_, accumulate, unsafe);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_index_put_impl::call(self_, indices_, values_, accumulate, unsafe);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & isin_out_Tensor_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto elements_meta = to_meta(elements);
        auto test_elements_meta = to_meta(test_elements);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::isin_Tensor_Tensor_out::call(elements_meta, test_elements_meta, assume_unique, invert, out_meta);
      }
      
      at::Tensor elements_;
      if (at::functionalization::impl::isFunctionalTensor(elements)) {
        at::functionalization::impl::sync(elements);
        elements_ = at::functionalization::impl::from_functional_tensor(elements);
      } else {
        elements_ = elements;
      }
      
      at::Tensor test_elements_;
      if (at::functionalization::impl::isFunctionalTensor(test_elements)) {
        at::functionalization::impl::sync(test_elements);
        test_elements_ = at::functionalization::impl::from_functional_tensor(test_elements);
      } else {
        test_elements_ = test_elements;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || elements.device().type() == c10::DeviceType::XLA || test_elements.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(elements) || at::functionalization::impl::isFunctionalTensor(test_elements))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::isin_Tensor_Tensor_out::call(elements_, test_elements_, assume_unique, invert, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::isin_Tensor_Tensor::call(elements_, test_elements_, assume_unique, invert);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & isin_out_Tensor_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto elements_meta = to_meta(elements);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::isin_Tensor_Scalar_out::call(elements_meta, test_element, assume_unique, invert, out_meta);
      }
      
      at::Tensor elements_;
      if (at::functionalization::impl::isFunctionalTensor(elements)) {
        at::functionalization::impl::sync(elements);
        elements_ = at::functionalization::impl::from_functional_tensor(elements);
      } else {
        elements_ = elements;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || elements.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(elements))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::isin_Tensor_Scalar_out::call(elements_, test_element, assume_unique, invert, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::isin_Tensor_Scalar::call(elements_, test_element, assume_unique, invert);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & isin_out_Scalar_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto test_elements_meta = to_meta(test_elements);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::isin_Scalar_Tensor_out::call(element, test_elements_meta, assume_unique, invert, out_meta);
      }
      
      at::Tensor test_elements_;
      if (at::functionalization::impl::isFunctionalTensor(test_elements)) {
        at::functionalization::impl::sync(test_elements);
        test_elements_ = at::functionalization::impl::from_functional_tensor(test_elements);
      } else {
        test_elements_ = test_elements;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || test_elements.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(test_elements))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::isin_Scalar_Tensor_out::call(element, test_elements_, assume_unique, invert, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::isin_Scalar_Tensor::call(element, test_elements_, assume_unique, invert);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & isnan_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::isnan_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::isnan_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::isnan::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & kron_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::kron_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::kron_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::kron::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> kthvalue_out_values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto values_meta = to_meta(values);
        auto indices_meta = to_meta(indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::kthvalue_values::call(self_meta, k, dim, keepdim, values_meta, indices_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::kthvalue_values::call(self_, k, dim, keepdim, values_, indices_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::kthvalue::call(self_, k, dim, keepdim);
        }
          auto values_inner = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::replace_(values, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(values);
  at::functionalization::impl::sync(values);
  auto values_inner_updated = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::propagate_xla_data_direct(values_inner, values_inner_updated);
  auto indices_inner = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::replace_(indices, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(indices);
  at::functionalization::impl::sync(indices);
  auto indices_inner_updated = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::propagate_xla_data_direct(indices_inner, indices_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> kthvalue_out_dimname_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto values_meta = to_meta(values);
        auto indices_meta = to_meta(indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::kthvalue_dimname_out::call(self_meta, k, dim, keepdim, values_meta, indices_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::kthvalue_dimname_out::call(self_, k, dim, keepdim, values_, indices_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::kthvalue_dimname::call(self_, k, dim, keepdim);
        }
          auto values_inner = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::replace_(values, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(values);
  at::functionalization::impl::sync(values);
  auto values_inner_updated = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::propagate_xla_data_direct(values_inner, values_inner_updated);
  auto indices_inner = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::replace_(indices, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(indices);
  at::functionalization::impl::sync(indices);
  auto indices_inner_updated = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::propagate_xla_data_direct(indices_inner, indices_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::native_layer_norm_out::call(input_meta, normalized_shape, weight_meta, bias_meta, eps, out0_meta, out1_meta, out2_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::native_layer_norm_out::call(input_, normalized_shape, weight_, bias_, eps, out0_, out1_, out2_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::native_layer_norm::call(input_, normalized_shape, weight_, bias_, eps);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_layer_norm_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_out_meta = to_meta(grad_out);
        auto input_meta = to_meta(input);
        auto mean_meta = to_meta(mean);
        auto rstd_meta = to_meta(rstd);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::native_layer_norm_backward_out::call(grad_out_meta, input_meta, normalized_shape, mean_meta, rstd_meta, weight_meta, bias_meta, output_mask, out0_meta, out1_meta, out2_meta);
      }
      
      at::Tensor grad_out_;
      if (at::functionalization::impl::isFunctionalTensor(grad_out)) {
        at::functionalization::impl::sync(grad_out);
        grad_out_ = at::functionalization::impl::from_functional_tensor(grad_out);
      } else {
        grad_out_ = grad_out;
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor mean_;
      if (at::functionalization::impl::isFunctionalTensor(mean)) {
        at::functionalization::impl::sync(mean);
        mean_ = at::functionalization::impl::from_functional_tensor(mean);
      } else {
        mean_ = mean;
      }
      
      at::Tensor rstd_;
      if (at::functionalization::impl::isFunctionalTensor(rstd)) {
        at::functionalization::impl::sync(rstd);
        rstd_ = at::functionalization::impl::from_functional_tensor(rstd);
      } else {
        rstd_ = rstd;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_out.device().type() == c10::DeviceType::XLA || input.device().type() == c10::DeviceType::XLA || mean.device().type() == c10::DeviceType::XLA || rstd.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_out) || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(mean) || at::functionalization::impl::isFunctionalTensor(rstd) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::native_layer_norm_backward_out::call(grad_out_, input_, normalized_shape, mean_, rstd_, weight_, bias_, output_mask, out0_, out1_, out2_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::native_layer_norm_backward::call(grad_out_, input_, normalized_shape, mean_, rstd_, weight_, bias_, output_mask);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
      }
    }

    at::Tensor & nan_to_num_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<double> nan, ::std::optional<double> posinf, ::std::optional<double> neginf, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::nan_to_num_out::call(self_meta, nan, posinf, neginf, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::nan_to_num_out::call(self_, nan, posinf, neginf, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::nan_to_num::call(self_, nan, posinf, neginf);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & nan_to_num_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, ::std::optional<double> nan, ::std::optional<double> posinf, ::std::optional<double> neginf) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::nan_to_num_::call(self_meta, nan, posinf, neginf);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::nan_to_num_::call(self_, nan, posinf, neginf);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::nan_to_num::call(self_, nan, posinf, neginf);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & linear_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linear_out::call(input_meta, weight_meta, bias_meta, out_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linear_out::call(input_, weight_, bias_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linear::call(input_, weight_, bias_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linear_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto grad_output_meta = to_meta(grad_output);
        auto weight_meta = to_meta(weight);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linear_backward_out::call(self_meta, grad_output_meta, weight_meta, output_mask, out0_meta, out1_meta, out2_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || grad_output.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::linear_backward_out::call(self_, grad_output_, weight_, output_mask, out0_, out1_, out2_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linear_backward::call(self_, grad_output_, weight_, output_mask);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
      }
    }

    at::Tensor & mkldnn_linear_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mkldnn_linear_out::call(self_meta, weight_meta, bias_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mkldnn_linear_out::call(self_, weight_, bias_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mkldnn_linear::call(self_, weight_, bias_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & mkldnn_linear_backward_input_out_out(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto weight_meta = to_meta(weight);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mkldnn_linear_backward_input_out::call(input_size, grad_output_meta, weight_meta, out_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mkldnn_linear_backward_input_out::call(input_size, grad_output_, weight_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mkldnn_linear_backward_input::call(input_size, grad_output_, weight_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> mkldnn_linear_backward_weights_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mkldnn_linear_backward_weights_out::call(grad_output_meta, input_meta, weight_meta, bias_defined, out0_meta, out1_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || input.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::mkldnn_linear_backward_weights_out::call(grad_output_, input_, weight_, bias_defined, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mkldnn_linear_backward_weights::call(grad_output_, input_, weight_, bias_defined);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_linear_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto grad_output_meta = to_meta(grad_output);
        auto weight_meta = to_meta(weight);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mkldnn_linear_backward_out::call(self_meta, grad_output_meta, weight_meta, output_mask, out0_meta, out1_meta, out2_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || grad_output.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::mkldnn_linear_backward_out::call(self_, grad_output_, weight_, output_mask, out0_, out1_, out2_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mkldnn_linear_backward::call(self_, grad_output_, weight_, output_mask);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
      }
    }

    at::Tensor & ldexp_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::ldexp_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::ldexp_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::ldexp_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & ldexp_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::ldexp_::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::ldexp_::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::ldexp_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & linspace_out_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linspace_out::call(start, end, steps, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linspace_out::call(start, end, steps, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linspace::call(start, end, steps, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & linspace_out_Tensor_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Tensor & end, int64_t steps, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto start_meta = to_meta(start);
        auto end_meta = to_meta(end);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linspace_Tensor_Tensor_out::call(start_meta, end_meta, steps, out_meta);
      }
      
      at::Tensor start_;
      if (at::functionalization::impl::isFunctionalTensor(start)) {
        at::functionalization::impl::sync(start);
        start_ = at::functionalization::impl::from_functional_tensor(start);
      } else {
        start_ = start;
      }
      
      at::Tensor end_;
      if (at::functionalization::impl::isFunctionalTensor(end)) {
        at::functionalization::impl::sync(end);
        end_ = at::functionalization::impl::from_functional_tensor(end);
      } else {
        end_ = end;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || start.device().type() == c10::DeviceType::XLA || end.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(start) || at::functionalization::impl::isFunctionalTensor(end))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linspace_Tensor_Tensor_out::call(start_, end_, steps, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linspace_Tensor_Tensor::call(start_, end_, steps, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & linspace_out_Tensor_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Scalar & end, int64_t steps, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto start_meta = to_meta(start);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linspace_Tensor_Scalar_out::call(start_meta, end, steps, out_meta);
      }
      
      at::Tensor start_;
      if (at::functionalization::impl::isFunctionalTensor(start)) {
        at::functionalization::impl::sync(start);
        start_ = at::functionalization::impl::from_functional_tensor(start);
      } else {
        start_ = start;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || start.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(start))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linspace_Tensor_Scalar_out::call(start_, end, steps, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linspace_Tensor_Scalar::call(start_, end, steps, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & linspace_out_Scalar_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Tensor & end, int64_t steps, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto end_meta = to_meta(end);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linspace_Scalar_Tensor_out::call(start, end_meta, steps, out_meta);
      }
      
      at::Tensor end_;
      if (at::functionalization::impl::isFunctionalTensor(end)) {
        at::functionalization::impl::sync(end);
        end_ = at::functionalization::impl::from_functional_tensor(end);
      } else {
        end_ = end;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || end.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(end))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linspace_Scalar_Tensor_out::call(start, end_, steps, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linspace_Scalar_Tensor::call(start, end_, steps, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & log_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::log_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::log_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::log::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & log_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::log_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::log_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::log::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & log10_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::log10_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::log10_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::log10::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & log10_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::log10_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::log10_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::log10::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & log1p_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::log1p_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::log1p_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::log1p::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & log1p_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::log1p_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::log1p_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::log1p::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & log2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::log2_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::log2_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::log2::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & log2_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::log2_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::log2_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::log2::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & logaddexp_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::logaddexp_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::logaddexp_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::logaddexp::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & logaddexp2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::logaddexp2_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::logaddexp2_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::logaddexp2::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & xlogy_out_OutTensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::xlogy_OutTensor::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::xlogy_OutTensor::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::xlogy_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & xlogy__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::xlogy__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::xlogy__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::xlogy_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & xlogy_out_OutScalar_Self(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::xlogy_OutScalar_Self::call(self, other_meta, out_meta);
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::xlogy_OutScalar_Self::call(self, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::xlogy_Scalar_Self::call(self, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & xlogy_out_OutScalar_Other(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::xlogy_OutScalar_Other::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::xlogy_OutScalar_Other::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::xlogy_Scalar_Other::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & xlogy__Scalar_Other(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::xlogy__Scalar_Other::call(self_meta, other);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::xlogy__Scalar_Other::call(self_, other);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::xlogy_Scalar_Other::call(self_, other);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & logspace_out_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::logspace_out::call(start, end, steps, base, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::logspace_out::call(start, end, steps, base, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::logspace::call(start, end, steps, base, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & logspace_out_Tensor_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto start_meta = to_meta(start);
        auto end_meta = to_meta(end);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::logspace_Tensor_Tensor_out::call(start_meta, end_meta, steps, base, out_meta);
      }
      
      at::Tensor start_;
      if (at::functionalization::impl::isFunctionalTensor(start)) {
        at::functionalization::impl::sync(start);
        start_ = at::functionalization::impl::from_functional_tensor(start);
      } else {
        start_ = start;
      }
      
      at::Tensor end_;
      if (at::functionalization::impl::isFunctionalTensor(end)) {
        at::functionalization::impl::sync(end);
        end_ = at::functionalization::impl::from_functional_tensor(end);
      } else {
        end_ = end;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || start.device().type() == c10::DeviceType::XLA || end.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(start) || at::functionalization::impl::isFunctionalTensor(end))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::logspace_Tensor_Tensor_out::call(start_, end_, steps, base, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::logspace_Tensor_Tensor::call(start_, end_, steps, base, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & logspace_out_Tensor_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto start_meta = to_meta(start);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::logspace_Tensor_Scalar_out::call(start_meta, end, steps, base, out_meta);
      }
      
      at::Tensor start_;
      if (at::functionalization::impl::isFunctionalTensor(start)) {
        at::functionalization::impl::sync(start);
        start_ = at::functionalization::impl::from_functional_tensor(start);
      } else {
        start_ = start;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || start.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(start))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::logspace_Tensor_Scalar_out::call(start_, end, steps, base, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::logspace_Tensor_Scalar::call(start_, end, steps, base, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & logspace_out_Scalar_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto end_meta = to_meta(end);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::logspace_Scalar_Tensor_out::call(start, end_meta, steps, base, out_meta);
      }
      
      at::Tensor end_;
      if (at::functionalization::impl::isFunctionalTensor(end)) {
        at::functionalization::impl::sync(end);
        end_ = at::functionalization::impl::from_functional_tensor(end);
      } else {
        end_ = end;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || end.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(end))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::logspace_Scalar_Tensor_out::call(start, end_, steps, base, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::logspace_Scalar_Tensor::call(start, end_, steps, base, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & log_softmax_out_int_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::log_softmax_int_out::call(self_meta, dim, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::log_softmax_int_out::call(self_, dim, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::log_softmax_int::call(self_, dim, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _log_softmax_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_log_softmax_out::call(self_meta, dim, half_to_float, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_log_softmax_out::call(self_, dim, half_to_float, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_log_softmax::call(self_, dim, half_to_float);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _log_softmax_backward_data_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto output_meta = to_meta(output);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_log_softmax_backward_data_out::call(grad_output_meta, output_meta, dim, input_dtype, out_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor output_;
      if (at::functionalization::impl::isFunctionalTensor(output)) {
        at::functionalization::impl::sync(output);
        output_ = at::functionalization::impl::from_functional_tensor(output);
      } else {
        output_ = output;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || output.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(output))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_log_softmax_backward_data_out::call(grad_output_, output_, dim, input_dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_log_softmax_backward_data::call(grad_output_, output_, dim, input_dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _logcumsumexp_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_logcumsumexp_out::call(self_meta, dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_logcumsumexp_out::call(self_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_logcumsumexp::call(self_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & logcumsumexp_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::logcumsumexp_out::call(self_meta, dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::logcumsumexp_out::call(self_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::logcumsumexp::call(self_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & logcumsumexp_out_dimname_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::logcumsumexp_dimname_out::call(self_meta, dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::logcumsumexp_dimname_out::call(self_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::logcumsumexp_dimname::call(self_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & logsumexp_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::logsumexp_out::call(self_meta, dim, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::logsumexp_out::call(self_, dim, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::logsumexp::call(self_, dim, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & logsumexp_out_names_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::logsumexp_names_out::call(self_meta, dim, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::logsumexp_names_out::call(self_, dim, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::logsumexp_names::call(self_, dim, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & matmul_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::matmul_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::matmul_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::matmul::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> matmul_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array<bool,2> mask, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_meta = to_meta(grad);
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::matmul_backward_out::call(grad_meta, self_meta, other_meta, mask, out0_meta, out1_meta);
      }
      
      at::Tensor grad_;
      if (at::functionalization::impl::isFunctionalTensor(grad)) {
        at::functionalization::impl::sync(grad);
        grad_ = at::functionalization::impl::from_functional_tensor(grad);
      } else {
        grad_ = grad;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::matmul_backward_out::call(grad_, self_, other_, mask, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::matmul_backward::call(grad_, self_, other_, mask);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    at::Tensor & matrix_power_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::matrix_power_out::call(self_meta, n, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::matrix_power_out::call(self_, n, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::matrix_power::call(self_, n);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> _aminmax_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_aminmax_out::call(self_meta, out0_meta, out1_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_aminmax_out::call(self_, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_aminmax::call(self_);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> _aminmax_out_dim_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_aminmax_dim_out::call(self_meta, dim, keepdim, out0_meta, out1_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_aminmax_dim_out::call(self_, dim, keepdim, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_aminmax_dim::call(self_, dim, keepdim);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> aminmax_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim, at::Tensor & min, at::Tensor & max) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto min_meta = to_meta(min);
        auto max_meta = to_meta(max);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::aminmax_out::call(self_meta, dim, keepdim, min_meta, max_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor min_;
      if (at::functionalization::impl::isFunctionalTensor(min)) {
        at::functionalization::impl::sync(min);
        min_ = at::functionalization::impl::from_functional_tensor(min);
      } else {
        min_ = min;
      }
      
      at::Tensor max_;
      if (at::functionalization::impl::isFunctionalTensor(max)) {
        at::functionalization::impl::sync(max);
        max_ = at::functionalization::impl::from_functional_tensor(max);
      } else {
        max_ = max;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(min) && at::functionalization::impl::isFunctionalTensor(max))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::aminmax_out::call(self_, dim, keepdim, min_, max_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(min, max);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::aminmax::call(self_, dim, keepdim);
        }
          auto min_inner = at::functionalization::impl::from_functional_tensor(min);
  at::functionalization::impl::replace_(min, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(min);
  at::functionalization::impl::sync(min);
  auto min_inner_updated = at::functionalization::impl::from_functional_tensor(min);
  at::functionalization::impl::propagate_xla_data_direct(min_inner, min_inner_updated);
  auto max_inner = at::functionalization::impl::from_functional_tensor(max);
  at::functionalization::impl::replace_(max, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(max);
  at::functionalization::impl::sync(max);
  auto max_inner_updated = at::functionalization::impl::from_functional_tensor(max);
  at::functionalization::impl::propagate_xla_data_direct(max_inner, max_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(min, max);
      }
    }

    at::Tensor & _compute_linear_combination_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & coefficients, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto coefficients_meta = to_meta(coefficients);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_compute_linear_combination_out::call(input_meta, coefficients_meta, out_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor coefficients_;
      if (at::functionalization::impl::isFunctionalTensor(coefficients)) {
        at::functionalization::impl::sync(coefficients);
        coefficients_ = at::functionalization::impl::from_functional_tensor(coefficients);
      } else {
        coefficients_ = coefficients;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || coefficients.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(coefficients))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_compute_linear_combination_out::call(input_, coefficients_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_compute_linear_combination::call(input_, coefficients_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> max_out_dim_max(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto max_meta = to_meta(max);
        auto max_values_meta = to_meta(max_values);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::max_dim_max::call(self_meta, dim, keepdim, max_meta, max_values_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor max_;
      if (at::functionalization::impl::isFunctionalTensor(max)) {
        at::functionalization::impl::sync(max);
        max_ = at::functionalization::impl::from_functional_tensor(max);
      } else {
        max_ = max;
      }
      
      at::Tensor max_values_;
      if (at::functionalization::impl::isFunctionalTensor(max_values)) {
        at::functionalization::impl::sync(max_values);
        max_values_ = at::functionalization::impl::from_functional_tensor(max_values);
      } else {
        max_values_ = max_values;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(max) && at::functionalization::impl::isFunctionalTensor(max_values))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::max_dim_max::call(self_, dim, keepdim, max_, max_values_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(max, max_values);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::max_dim::call(self_, dim, keepdim);
        }
          auto max_inner = at::functionalization::impl::from_functional_tensor(max);
  at::functionalization::impl::replace_(max, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(max);
  at::functionalization::impl::sync(max);
  auto max_inner_updated = at::functionalization::impl::from_functional_tensor(max);
  at::functionalization::impl::propagate_xla_data_direct(max_inner, max_inner_updated);
  auto max_values_inner = at::functionalization::impl::from_functional_tensor(max_values);
  at::functionalization::impl::replace_(max_values, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(max_values);
  at::functionalization::impl::sync(max_values);
  auto max_values_inner_updated = at::functionalization::impl::from_functional_tensor(max_values);
  at::functionalization::impl::propagate_xla_data_direct(max_values_inner, max_values_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(max, max_values);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> max_out_names_dim_max(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto max_meta = to_meta(max);
        auto max_values_meta = to_meta(max_values);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::max_names_dim_max::call(self_meta, dim, keepdim, max_meta, max_values_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor max_;
      if (at::functionalization::impl::isFunctionalTensor(max)) {
        at::functionalization::impl::sync(max);
        max_ = at::functionalization::impl::from_functional_tensor(max);
      } else {
        max_ = max;
      }
      
      at::Tensor max_values_;
      if (at::functionalization::impl::isFunctionalTensor(max_values)) {
        at::functionalization::impl::sync(max_values);
        max_values_ = at::functionalization::impl::from_functional_tensor(max_values);
      } else {
        max_values_ = max_values;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(max) && at::functionalization::impl::isFunctionalTensor(max_values))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::max_names_dim_max::call(self_, dim, keepdim, max_, max_values_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(max, max_values);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::max_names_dim::call(self_, dim, keepdim);
        }
          auto max_inner = at::functionalization::impl::from_functional_tensor(max);
  at::functionalization::impl::replace_(max, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(max);
  at::functionalization::impl::sync(max);
  auto max_inner_updated = at::functionalization::impl::from_functional_tensor(max);
  at::functionalization::impl::propagate_xla_data_direct(max_inner, max_inner_updated);
  auto max_values_inner = at::functionalization::impl::from_functional_tensor(max_values);
  at::functionalization::impl::replace_(max_values, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(max_values);
  at::functionalization::impl::sync(max_values);
  auto max_values_inner_updated = at::functionalization::impl::from_functional_tensor(max_values);
  at::functionalization::impl::propagate_xla_data_direct(max_values_inner, max_values_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(max, max_values);
      }
    }

    at::Tensor & amax_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::amax_out::call(self_meta, dim, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::amax_out::call(self_, dim, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::amax::call(self_, dim, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & max_pool2d_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::max_pool2d_backward_out::call(grad_output_meta, self_meta, kernel_size, stride, padding, dilation, ceil_mode, out_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::max_pool2d_backward_out::call(grad_output_, self_, kernel_size, stride, padding, dilation, ceil_mode, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::max_pool2d_backward::call(grad_output_, self_, kernel_size, stride, padding, dilation, ceil_mode);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & mkldnn_max_pool2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mkldnn_max_pool2d_out::call(self_meta, kernel_size, stride, padding, dilation, ceil_mode, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mkldnn_max_pool2d_out::call(self_, kernel_size, stride, padding, dilation, ceil_mode, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mkldnn_max_pool2d::call(self_, kernel_size, stride, padding, dilation, ceil_mode);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & mkldnn_max_pool2d_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto output_meta = to_meta(output);
        auto input_meta = to_meta(input);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mkldnn_max_pool2d_backward_out::call(grad_output_meta, output_meta, input_meta, kernel_size, stride, padding, dilation, ceil_mode, out_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor output_;
      if (at::functionalization::impl::isFunctionalTensor(output)) {
        at::functionalization::impl::sync(output);
        output_ = at::functionalization::impl::from_functional_tensor(output);
      } else {
        output_ = output;
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || output.device().type() == c10::DeviceType::XLA || input.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(output) || at::functionalization::impl::isFunctionalTensor(input))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mkldnn_max_pool2d_backward_out::call(grad_output_, output_, input_, kernel_size, stride, padding, dilation, ceil_mode, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mkldnn_max_pool2d_backward::call(grad_output_, output_, input_, kernel_size, stride, padding, dilation, ceil_mode);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & mkldnn_max_pool3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mkldnn_max_pool3d_out::call(self_meta, kernel_size, stride, padding, dilation, ceil_mode, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mkldnn_max_pool3d_out::call(self_, kernel_size, stride, padding, dilation, ceil_mode, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mkldnn_max_pool3d::call(self_, kernel_size, stride, padding, dilation, ceil_mode);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & mkldnn_max_pool3d_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto output_meta = to_meta(output);
        auto input_meta = to_meta(input);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mkldnn_max_pool3d_backward_out::call(grad_output_meta, output_meta, input_meta, kernel_size, stride, padding, dilation, ceil_mode, out_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor output_;
      if (at::functionalization::impl::isFunctionalTensor(output)) {
        at::functionalization::impl::sync(output);
        output_ = at::functionalization::impl::from_functional_tensor(output);
      } else {
        output_ = output;
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || output.device().type() == c10::DeviceType::XLA || input.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(output) || at::functionalization::impl::isFunctionalTensor(input))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mkldnn_max_pool3d_backward_out::call(grad_output_, output_, input_, kernel_size, stride, padding, dilation, ceil_mode, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mkldnn_max_pool3d_backward::call(grad_output_, output_, input_, kernel_size, stride, padding, dilation, ceil_mode);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & quantized_max_pool1d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::quantized_max_pool1d_out::call(self_meta, kernel_size, stride, padding, dilation, ceil_mode, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::quantized_max_pool1d_out::call(self_, kernel_size, stride, padding, dilation, ceil_mode, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::quantized_max_pool1d::call(self_, kernel_size, stride, padding, dilation, ceil_mode);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & quantized_max_pool2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::quantized_max_pool2d_out::call(self_meta, kernel_size, stride, padding, dilation, ceil_mode, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::quantized_max_pool2d_out::call(self_, kernel_size, stride, padding, dilation, ceil_mode, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::quantized_max_pool2d::call(self_, kernel_size, stride, padding, dilation, ceil_mode);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & quantized_max_pool3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::quantized_max_pool3d_out::call(self_meta, kernel_size, stride, padding, dilation, ceil_mode, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::quantized_max_pool3d_out::call(self_, kernel_size, stride, padding, dilation, ceil_mode, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::quantized_max_pool3d::call(self_, kernel_size, stride, padding, dilation, ceil_mode);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & mean_out_dtype_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mean_dtype_out::call(self_meta, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mean_dtype_out::call(self_, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mean::call(self_, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & mean_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mean_out::call(self_meta, dim, keepdim, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mean_out::call(self_, dim, keepdim, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mean_dim::call(self_, dim, keepdim, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & mean_out_names_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mean_names_out::call(self_meta, dim, keepdim, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mean_names_out::call(self_, dim, keepdim, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mean_names_dim::call(self_, dim, keepdim, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & nanmean_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::nanmean_out::call(self_meta, dim, keepdim, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::nanmean_out::call(self_, dim, keepdim, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::nanmean::call(self_, dim, keepdim, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & median_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::median_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::median_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::median::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> median_out_dim_values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto values_meta = to_meta(values);
        auto indices_meta = to_meta(indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::median_dim_values::call(self_meta, dim, keepdim, values_meta, indices_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::median_dim_values::call(self_, dim, keepdim, values_, indices_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::median_dim::call(self_, dim, keepdim);
        }
          auto values_inner = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::replace_(values, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(values);
  at::functionalization::impl::sync(values);
  auto values_inner_updated = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::propagate_xla_data_direct(values_inner, values_inner_updated);
  auto indices_inner = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::replace_(indices, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(indices);
  at::functionalization::impl::sync(indices);
  auto indices_inner_updated = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::propagate_xla_data_direct(indices_inner, indices_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> median_out_names_dim_values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto values_meta = to_meta(values);
        auto indices_meta = to_meta(indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::median_names_dim_values::call(self_meta, dim, keepdim, values_meta, indices_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::median_names_dim_values::call(self_, dim, keepdim, values_, indices_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::median_names_dim::call(self_, dim, keepdim);
        }
          auto values_inner = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::replace_(values, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(values);
  at::functionalization::impl::sync(values);
  auto values_inner_updated = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::propagate_xla_data_direct(values_inner, values_inner_updated);
  auto indices_inner = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::replace_(indices, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(indices);
  at::functionalization::impl::sync(indices);
  auto indices_inner_updated = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::propagate_xla_data_direct(indices_inner, indices_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
      }
    }

    at::Tensor & nanmedian_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::nanmedian_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::nanmedian_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::nanmedian::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> nanmedian_out_dim_values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto values_meta = to_meta(values);
        auto indices_meta = to_meta(indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::nanmedian_dim_values::call(self_meta, dim, keepdim, values_meta, indices_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::nanmedian_dim_values::call(self_, dim, keepdim, values_, indices_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::nanmedian_dim::call(self_, dim, keepdim);
        }
          auto values_inner = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::replace_(values, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(values);
  at::functionalization::impl::sync(values);
  auto values_inner_updated = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::propagate_xla_data_direct(values_inner, values_inner_updated);
  auto indices_inner = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::replace_(indices, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(indices);
  at::functionalization::impl::sync(indices);
  auto indices_inner_updated = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::propagate_xla_data_direct(indices_inner, indices_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> nanmedian_out_names_dim_values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto values_meta = to_meta(values);
        auto indices_meta = to_meta(indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::nanmedian_names_dim_values::call(self_meta, dim, keepdim, values_meta, indices_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::nanmedian_names_dim_values::call(self_, dim, keepdim, values_, indices_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::nanmedian_names_dim::call(self_, dim, keepdim);
        }
          auto values_inner = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::replace_(values, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(values);
  at::functionalization::impl::sync(values);
  auto values_inner_updated = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::propagate_xla_data_direct(values_inner, values_inner_updated);
  auto indices_inner = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::replace_(indices, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(indices);
  at::functionalization::impl::sync(indices);
  auto indices_inner_updated = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::propagate_xla_data_direct(indices_inner, indices_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> min_out_dim_min(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto min_meta = to_meta(min);
        auto min_indices_meta = to_meta(min_indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::min_dim_min::call(self_meta, dim, keepdim, min_meta, min_indices_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor min_;
      if (at::functionalization::impl::isFunctionalTensor(min)) {
        at::functionalization::impl::sync(min);
        min_ = at::functionalization::impl::from_functional_tensor(min);
      } else {
        min_ = min;
      }
      
      at::Tensor min_indices_;
      if (at::functionalization::impl::isFunctionalTensor(min_indices)) {
        at::functionalization::impl::sync(min_indices);
        min_indices_ = at::functionalization::impl::from_functional_tensor(min_indices);
      } else {
        min_indices_ = min_indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(min) && at::functionalization::impl::isFunctionalTensor(min_indices))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::min_dim_min::call(self_, dim, keepdim, min_, min_indices_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(min, min_indices);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::min_dim::call(self_, dim, keepdim);
        }
          auto min_inner = at::functionalization::impl::from_functional_tensor(min);
  at::functionalization::impl::replace_(min, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(min);
  at::functionalization::impl::sync(min);
  auto min_inner_updated = at::functionalization::impl::from_functional_tensor(min);
  at::functionalization::impl::propagate_xla_data_direct(min_inner, min_inner_updated);
  auto min_indices_inner = at::functionalization::impl::from_functional_tensor(min_indices);
  at::functionalization::impl::replace_(min_indices, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(min_indices);
  at::functionalization::impl::sync(min_indices);
  auto min_indices_inner_updated = at::functionalization::impl::from_functional_tensor(min_indices);
  at::functionalization::impl::propagate_xla_data_direct(min_indices_inner, min_indices_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(min, min_indices);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> min_out_names_dim_min(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto min_meta = to_meta(min);
        auto min_indices_meta = to_meta(min_indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::min_names_dim_min::call(self_meta, dim, keepdim, min_meta, min_indices_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor min_;
      if (at::functionalization::impl::isFunctionalTensor(min)) {
        at::functionalization::impl::sync(min);
        min_ = at::functionalization::impl::from_functional_tensor(min);
      } else {
        min_ = min;
      }
      
      at::Tensor min_indices_;
      if (at::functionalization::impl::isFunctionalTensor(min_indices)) {
        at::functionalization::impl::sync(min_indices);
        min_indices_ = at::functionalization::impl::from_functional_tensor(min_indices);
      } else {
        min_indices_ = min_indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(min) && at::functionalization::impl::isFunctionalTensor(min_indices))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::min_names_dim_min::call(self_, dim, keepdim, min_, min_indices_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(min, min_indices);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::min_names_dim::call(self_, dim, keepdim);
        }
          auto min_inner = at::functionalization::impl::from_functional_tensor(min);
  at::functionalization::impl::replace_(min, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(min);
  at::functionalization::impl::sync(min);
  auto min_inner_updated = at::functionalization::impl::from_functional_tensor(min);
  at::functionalization::impl::propagate_xla_data_direct(min_inner, min_inner_updated);
  auto min_indices_inner = at::functionalization::impl::from_functional_tensor(min_indices);
  at::functionalization::impl::replace_(min_indices, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(min_indices);
  at::functionalization::impl::sync(min_indices);
  auto min_indices_inner_updated = at::functionalization::impl::from_functional_tensor(min_indices);
  at::functionalization::impl::propagate_xla_data_direct(min_indices_inner, min_indices_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(min, min_indices);
      }
    }

    at::Tensor & amin_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::amin_out::call(self_meta, dim, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::amin_out::call(self_, dim, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::amin::call(self_, dim, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _mps_convolution_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_mps_convolution_out::call(self_meta, weight_meta, bias_meta, padding, stride, dilation, groups, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_mps_convolution_out::call(self_, weight_, bias_, padding, stride, dilation, groups, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_mps_convolution::call(self_, weight_, bias_, padding, stride, dilation, groups);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> mps_convolution_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto grad_output_meta = to_meta(grad_output);
        auto weight_meta = to_meta(weight);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mps_convolution_backward_out::call(self_meta, grad_output_meta, weight_meta, padding, stride, dilation, groups, output_mask, out0_meta, out1_meta, out2_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || grad_output.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::mps_convolution_backward_out::call(self_, grad_output_, weight_, padding, stride, dilation, groups, output_mask, out0_, out1_, out2_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mps_convolution_backward::call(self_, grad_output_, weight_, padding, stride, dilation, groups, output_mask);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
      }
    }

    at::Tensor & mkldnn_convolution_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mkldnn_convolution_out::call(self_meta, weight_meta, bias_meta, padding, stride, dilation, groups, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mkldnn_convolution_out::call(self_, weight_, bias_, padding, stride, dilation, groups, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mkldnn_convolution::call(self_, weight_, bias_, padding, stride, dilation, groups);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_rnn_layer_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto weight0_meta = to_meta(weight0);
        auto weight1_meta = to_meta(weight1);
        auto weight2_meta = to_meta(weight2);
        auto weight3_meta = to_meta(weight3);
        auto hx__meta = to_meta(hx_);
        auto cx__meta = to_meta(cx_);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        auto out3_meta = to_meta(out3);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mkldnn_rnn_layer_out::call(input_meta, weight0_meta, weight1_meta, weight2_meta, weight3_meta, hx__meta, cx__meta, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train, out0_meta, out1_meta, out2_meta, out3_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor weight0_;
      if (at::functionalization::impl::isFunctionalTensor(weight0)) {
        at::functionalization::impl::sync(weight0);
        weight0_ = at::functionalization::impl::from_functional_tensor(weight0);
      } else {
        weight0_ = weight0;
      }
      
      at::Tensor weight1_;
      if (at::functionalization::impl::isFunctionalTensor(weight1)) {
        at::functionalization::impl::sync(weight1);
        weight1_ = at::functionalization::impl::from_functional_tensor(weight1);
      } else {
        weight1_ = weight1;
      }
      
      at::Tensor weight2_;
      if (at::functionalization::impl::isFunctionalTensor(weight2)) {
        at::functionalization::impl::sync(weight2);
        weight2_ = at::functionalization::impl::from_functional_tensor(weight2);
      } else {
        weight2_ = weight2;
      }
      
      at::Tensor weight3_;
      if (at::functionalization::impl::isFunctionalTensor(weight3)) {
        at::functionalization::impl::sync(weight3);
        weight3_ = at::functionalization::impl::from_functional_tensor(weight3);
      } else {
        weight3_ = weight3;
      }
      
      at::Tensor hx__;
      if (at::functionalization::impl::isFunctionalTensor(hx_)) {
        at::functionalization::impl::sync(hx_);
        hx__ = at::functionalization::impl::from_functional_tensor(hx_);
      } else {
        hx__ = hx_;
      }
      
      at::Tensor cx__;
      if (at::functionalization::impl::isFunctionalTensor(cx_)) {
        at::functionalization::impl::sync(cx_);
        cx__ = at::functionalization::impl::from_functional_tensor(cx_);
      } else {
        cx__ = cx_;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      
      at::Tensor out3_;
      if (at::functionalization::impl::isFunctionalTensor(out3)) {
        at::functionalization::impl::sync(out3);
        out3_ = at::functionalization::impl::from_functional_tensor(out3);
      } else {
        out3_ = out3;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || weight0.device().type() == c10::DeviceType::XLA || weight1.device().type() == c10::DeviceType::XLA || weight2.device().type() == c10::DeviceType::XLA || weight3.device().type() == c10::DeviceType::XLA || hx_.device().type() == c10::DeviceType::XLA || cx_.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight0) || at::functionalization::impl::isFunctionalTensor(weight1) || at::functionalization::impl::isFunctionalTensor(weight2) || at::functionalization::impl::isFunctionalTensor(weight3) || at::functionalization::impl::isFunctionalTensor(hx_) || at::functionalization::impl::isFunctionalTensor(cx_))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::mkldnn_rnn_layer_out::call(input_, weight0_, weight1_, weight2_, weight3_, hx__, cx__, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train, out0_, out1_, out2_, out3_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mkldnn_rnn_layer::call(input_, weight0_, weight1_, weight2_, weight3_, hx__, cx__, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
  auto out3_inner = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::replace_(out3, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(out3);
  at::functionalization::impl::sync(out3);
  auto out3_inner_updated = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::propagate_xla_data_direct(out3_inner, out3_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_rnn_layer_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const ::std::optional<at::Tensor> & grad_output, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5, at::Tensor & out6) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto weight1_meta = to_meta(weight1);
        auto weight2_meta = to_meta(weight2);
        auto weight3_meta = to_meta(weight3);
        auto weight4_meta = to_meta(weight4);
        auto hx__meta = to_meta(hx_);
        auto cx_tmp_meta = to_meta(cx_tmp);
        auto output_meta = to_meta(output);
        auto hy__meta = to_meta(hy_);
        auto cy__meta = to_meta(cy_);
        auto grad_output_meta = to_meta(grad_output);
        auto grad_hy_meta = to_meta(grad_hy);
        auto grad_cy_meta = to_meta(grad_cy);
        auto workspace_meta = to_meta(workspace);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        auto out3_meta = to_meta(out3);
        auto out4_meta = to_meta(out4);
        auto out5_meta = to_meta(out5);
        auto out6_meta = to_meta(out6);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mkldnn_rnn_layer_backward_out::call(input_meta, weight1_meta, weight2_meta, weight3_meta, weight4_meta, hx__meta, cx_tmp_meta, output_meta, hy__meta, cy__meta, grad_output_meta, grad_hy_meta, grad_cy_meta, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace_meta, out0_meta, out1_meta, out2_meta, out3_meta, out4_meta, out5_meta, out6_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor weight1_;
      if (at::functionalization::impl::isFunctionalTensor(weight1)) {
        at::functionalization::impl::sync(weight1);
        weight1_ = at::functionalization::impl::from_functional_tensor(weight1);
      } else {
        weight1_ = weight1;
      }
      
      at::Tensor weight2_;
      if (at::functionalization::impl::isFunctionalTensor(weight2)) {
        at::functionalization::impl::sync(weight2);
        weight2_ = at::functionalization::impl::from_functional_tensor(weight2);
      } else {
        weight2_ = weight2;
      }
      
      at::Tensor weight3_;
      if (at::functionalization::impl::isFunctionalTensor(weight3)) {
        at::functionalization::impl::sync(weight3);
        weight3_ = at::functionalization::impl::from_functional_tensor(weight3);
      } else {
        weight3_ = weight3;
      }
      
      at::Tensor weight4_;
      if (at::functionalization::impl::isFunctionalTensor(weight4)) {
        at::functionalization::impl::sync(weight4);
        weight4_ = at::functionalization::impl::from_functional_tensor(weight4);
      } else {
        weight4_ = weight4;
      }
      
      at::Tensor hx__;
      if (at::functionalization::impl::isFunctionalTensor(hx_)) {
        at::functionalization::impl::sync(hx_);
        hx__ = at::functionalization::impl::from_functional_tensor(hx_);
      } else {
        hx__ = hx_;
      }
      
      at::Tensor cx_tmp_;
      if (at::functionalization::impl::isFunctionalTensor(cx_tmp)) {
        at::functionalization::impl::sync(cx_tmp);
        cx_tmp_ = at::functionalization::impl::from_functional_tensor(cx_tmp);
      } else {
        cx_tmp_ = cx_tmp;
      }
      
      at::Tensor output_;
      if (at::functionalization::impl::isFunctionalTensor(output)) {
        at::functionalization::impl::sync(output);
        output_ = at::functionalization::impl::from_functional_tensor(output);
      } else {
        output_ = output;
      }
      
      at::Tensor hy__;
      if (at::functionalization::impl::isFunctionalTensor(hy_)) {
        at::functionalization::impl::sync(hy_);
        hy__ = at::functionalization::impl::from_functional_tensor(hy_);
      } else {
        hy__ = hy_;
      }
      
      at::Tensor cy__;
      if (at::functionalization::impl::isFunctionalTensor(cy_)) {
        at::functionalization::impl::sync(cy_);
        cy__ = at::functionalization::impl::from_functional_tensor(cy_);
      } else {
        cy__ = cy_;
      }
      
      ::std::optional<at::Tensor> grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      ::std::optional<at::Tensor> grad_hy_;
      if (at::functionalization::impl::isFunctionalTensor(grad_hy)) {
        at::functionalization::impl::sync(grad_hy);
        grad_hy_ = at::functionalization::impl::from_functional_tensor(grad_hy);
      } else {
        grad_hy_ = grad_hy;
      }
      
      ::std::optional<at::Tensor> grad_cy_;
      if (at::functionalization::impl::isFunctionalTensor(grad_cy)) {
        at::functionalization::impl::sync(grad_cy);
        grad_cy_ = at::functionalization::impl::from_functional_tensor(grad_cy);
      } else {
        grad_cy_ = grad_cy;
      }
      
      at::Tensor workspace_;
      if (at::functionalization::impl::isFunctionalTensor(workspace)) {
        at::functionalization::impl::sync(workspace);
        workspace_ = at::functionalization::impl::from_functional_tensor(workspace);
      } else {
        workspace_ = workspace;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      
      at::Tensor out3_;
      if (at::functionalization::impl::isFunctionalTensor(out3)) {
        at::functionalization::impl::sync(out3);
        out3_ = at::functionalization::impl::from_functional_tensor(out3);
      } else {
        out3_ = out3;
      }
      
      at::Tensor out4_;
      if (at::functionalization::impl::isFunctionalTensor(out4)) {
        at::functionalization::impl::sync(out4);
        out4_ = at::functionalization::impl::from_functional_tensor(out4);
      } else {
        out4_ = out4;
      }
      
      at::Tensor out5_;
      if (at::functionalization::impl::isFunctionalTensor(out5)) {
        at::functionalization::impl::sync(out5);
        out5_ = at::functionalization::impl::from_functional_tensor(out5);
      } else {
        out5_ = out5;
      }
      
      at::Tensor out6_;
      if (at::functionalization::impl::isFunctionalTensor(out6)) {
        at::functionalization::impl::sync(out6);
        out6_ = at::functionalization::impl::from_functional_tensor(out6);
      } else {
        out6_ = out6;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3) && at::functionalization::impl::isFunctionalTensor(out4) && at::functionalization::impl::isFunctionalTensor(out5) && at::functionalization::impl::isFunctionalTensor(out6))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || weight1.device().type() == c10::DeviceType::XLA || weight2.device().type() == c10::DeviceType::XLA || weight3.device().type() == c10::DeviceType::XLA || weight4.device().type() == c10::DeviceType::XLA || hx_.device().type() == c10::DeviceType::XLA || cx_tmp.device().type() == c10::DeviceType::XLA || output.device().type() == c10::DeviceType::XLA || hy_.device().type() == c10::DeviceType::XLA || cy_.device().type() == c10::DeviceType::XLA || workspace.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight1) || at::functionalization::impl::isFunctionalTensor(weight2) || at::functionalization::impl::isFunctionalTensor(weight3) || at::functionalization::impl::isFunctionalTensor(weight4) || at::functionalization::impl::isFunctionalTensor(hx_) || at::functionalization::impl::isFunctionalTensor(cx_tmp) || at::functionalization::impl::isFunctionalTensor(output) || at::functionalization::impl::isFunctionalTensor(hy_) || at::functionalization::impl::isFunctionalTensor(cy_) || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(grad_hy) || at::functionalization::impl::isFunctionalTensor(grad_cy) || at::functionalization::impl::isFunctionalTensor(workspace))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::mkldnn_rnn_layer_backward_out::call(input_, weight1_, weight2_, weight3_, weight4_, hx__, cx_tmp_, output_, hy__, cy__, grad_output_, grad_hy_, grad_cy_, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace_, out0_, out1_, out2_, out3_, out4_, out5_, out6_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3, out4, out5, out6);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mkldnn_rnn_layer_backward::call(input_, weight1_, weight2_, weight3_, weight4_, hx__, cx_tmp_, output_, hy__, cy__, grad_output_, grad_hy_, grad_cy_, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace_);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
  auto out3_inner = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::replace_(out3, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(out3);
  at::functionalization::impl::sync(out3);
  auto out3_inner_updated = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::propagate_xla_data_direct(out3_inner, out3_inner_updated);
  auto out4_inner = at::functionalization::impl::from_functional_tensor(out4);
  at::functionalization::impl::replace_(out4, std::get<4>(tmp_output));
  at::functionalization::impl::commit_update(out4);
  at::functionalization::impl::sync(out4);
  auto out4_inner_updated = at::functionalization::impl::from_functional_tensor(out4);
  at::functionalization::impl::propagate_xla_data_direct(out4_inner, out4_inner_updated);
  auto out5_inner = at::functionalization::impl::from_functional_tensor(out5);
  at::functionalization::impl::replace_(out5, std::get<5>(tmp_output));
  at::functionalization::impl::commit_update(out5);
  at::functionalization::impl::sync(out5);
  auto out5_inner_updated = at::functionalization::impl::from_functional_tensor(out5);
  at::functionalization::impl::propagate_xla_data_direct(out5_inner, out5_inner_updated);
  auto out6_inner = at::functionalization::impl::from_functional_tensor(out6);
  at::functionalization::impl::replace_(out6, std::get<6>(tmp_output));
  at::functionalization::impl::commit_update(out6);
  at::functionalization::impl::sync(out6);
  auto out6_inner_updated = at::functionalization::impl::from_functional_tensor(out6);
  at::functionalization::impl::propagate_xla_data_direct(out6_inner, out6_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3, out4, out5, out6);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> miopen_batch_norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto running_mean_meta = to_meta(running_mean);
        auto running_var_meta = to_meta(running_var);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::miopen_batch_norm_out::call(input_meta, weight_meta, bias_meta, running_mean_meta, running_var_meta, training, exponential_average_factor, epsilon, out0_meta, out1_meta, out2_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      ::std::optional<at::Tensor> running_mean_;
      if (at::functionalization::impl::isFunctionalTensor(running_mean)) {
        at::functionalization::impl::sync(running_mean);
        running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean);
      } else {
        running_mean_ = running_mean;
      }
      
      ::std::optional<at::Tensor> running_var_;
      if (at::functionalization::impl::isFunctionalTensor(running_var)) {
        at::functionalization::impl::sync(running_var);
        running_var_ = at::functionalization::impl::from_functional_tensor(running_var);
      } else {
        running_var_ = running_var;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias) || at::functionalization::impl::isFunctionalTensor(running_mean) || at::functionalization::impl::isFunctionalTensor(running_var))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::miopen_batch_norm_out::call(input_, weight_, bias_, running_mean_, running_var_, training, exponential_average_factor, epsilon, out0_, out1_, out2_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::miopen_batch_norm::call(input_, weight_, bias_, running_mean_, running_var_, training, exponential_average_factor, epsilon);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> miopen_batch_norm_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto grad_output_meta = to_meta(grad_output);
        auto weight_meta = to_meta(weight);
        auto running_mean_meta = to_meta(running_mean);
        auto running_var_meta = to_meta(running_var);
        auto save_mean_meta = to_meta(save_mean);
        auto save_var_meta = to_meta(save_var);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::miopen_batch_norm_backward_out::call(input_meta, grad_output_meta, weight_meta, running_mean_meta, running_var_meta, save_mean_meta, save_var_meta, epsilon, out0_meta, out1_meta, out2_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> running_mean_;
      if (at::functionalization::impl::isFunctionalTensor(running_mean)) {
        at::functionalization::impl::sync(running_mean);
        running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean);
      } else {
        running_mean_ = running_mean;
      }
      
      ::std::optional<at::Tensor> running_var_;
      if (at::functionalization::impl::isFunctionalTensor(running_var)) {
        at::functionalization::impl::sync(running_var);
        running_var_ = at::functionalization::impl::from_functional_tensor(running_var);
      } else {
        running_var_ = running_var;
      }
      
      ::std::optional<at::Tensor> save_mean_;
      if (at::functionalization::impl::isFunctionalTensor(save_mean)) {
        at::functionalization::impl::sync(save_mean);
        save_mean_ = at::functionalization::impl::from_functional_tensor(save_mean);
      } else {
        save_mean_ = save_mean;
      }
      
      ::std::optional<at::Tensor> save_var_;
      if (at::functionalization::impl::isFunctionalTensor(save_var)) {
        at::functionalization::impl::sync(save_var);
        save_var_ = at::functionalization::impl::from_functional_tensor(save_var);
      } else {
        save_var_ = save_var;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || grad_output.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(running_mean) || at::functionalization::impl::isFunctionalTensor(running_var) || at::functionalization::impl::isFunctionalTensor(save_mean) || at::functionalization::impl::isFunctionalTensor(save_var))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::miopen_batch_norm_backward_out::call(input_, grad_output_, weight_, running_mean_, running_var_, save_mean_, save_var_, epsilon, out0_, out1_, out2_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::miopen_batch_norm_backward::call(input_, grad_output_, weight_, running_mean_, running_var_, save_mean_, save_var_, epsilon);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
      }
    }

    at::Tensor & miopen_convolution_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::miopen_convolution_out::call(self_meta, weight_meta, bias_meta, padding, stride, dilation, groups, benchmark, deterministic, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::miopen_convolution_out::call(self_, weight_, bias_, padding, stride, dilation, groups, benchmark, deterministic, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::miopen_convolution::call(self_, weight_, bias_, padding, stride, dilation, groups, benchmark, deterministic);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & miopen_convolution_transpose_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::miopen_convolution_transpose_out::call(self_meta, weight_meta, bias_meta, padding, output_padding, stride, dilation, groups, benchmark, deterministic, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::miopen_convolution_transpose_out::call(self_, weight_, bias_, padding, output_padding, stride, dilation, groups, benchmark, deterministic, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::miopen_convolution_transpose::call(self_, weight_, bias_, padding, output_padding, stride, dilation, groups, benchmark, deterministic);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & miopen_depthwise_convolution_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::miopen_depthwise_convolution_out::call(self_meta, weight_meta, bias_meta, padding, stride, dilation, groups, benchmark, deterministic, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::miopen_depthwise_convolution_out::call(self_, weight_, bias_, padding, stride, dilation, groups, benchmark, deterministic, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::miopen_depthwise_convolution::call(self_, weight_, bias_, padding, stride, dilation, groups, benchmark, deterministic);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> miopen_rnn_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto hx_meta = to_meta(hx);
        auto cx_meta = to_meta(cx);
        auto dropout_state_meta = to_meta(dropout_state);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        auto out3_meta = to_meta(out3);
        auto out4_meta = to_meta(out4);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::miopen_rnn_out::call(input_meta, weight_meta, weight_stride0, hx_meta, cx_meta, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_meta, out0_meta, out1_meta, out2_meta, out3_meta, out4_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      ::std::vector<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight.vec();
      }
      
      at::Tensor hx_;
      if (at::functionalization::impl::isFunctionalTensor(hx)) {
        at::functionalization::impl::sync(hx);
        hx_ = at::functionalization::impl::from_functional_tensor(hx);
      } else {
        hx_ = hx;
      }
      
      ::std::optional<at::Tensor> cx_;
      if (at::functionalization::impl::isFunctionalTensor(cx)) {
        at::functionalization::impl::sync(cx);
        cx_ = at::functionalization::impl::from_functional_tensor(cx);
      } else {
        cx_ = cx;
      }
      
      ::std::optional<at::Tensor> dropout_state_;
      if (at::functionalization::impl::isFunctionalTensor(dropout_state)) {
        at::functionalization::impl::sync(dropout_state);
        dropout_state_ = at::functionalization::impl::from_functional_tensor(dropout_state);
      } else {
        dropout_state_ = dropout_state;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      
      at::Tensor out3_;
      if (at::functionalization::impl::isFunctionalTensor(out3)) {
        at::functionalization::impl::sync(out3);
        out3_ = at::functionalization::impl::from_functional_tensor(out3);
      } else {
        out3_ = out3;
      }
      
      at::Tensor out4_;
      if (at::functionalization::impl::isFunctionalTensor(out4)) {
        at::functionalization::impl::sync(out4);
        out4_ = at::functionalization::impl::from_functional_tensor(out4);
      } else {
        out4_ = out4;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3) && at::functionalization::impl::isFunctionalTensor(out4))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || hx.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(hx) || at::functionalization::impl::isFunctionalTensor(cx) || at::functionalization::impl::isFunctionalTensor(dropout_state))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::miopen_rnn_out::call(input_, weight_, weight_stride0, hx_, cx_, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_, out0_, out1_, out2_, out3_, out4_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3, out4);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::miopen_rnn::call(input_, weight_, weight_stride0, hx_, cx_, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
  auto out3_inner = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::replace_(out3, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(out3);
  at::functionalization::impl::sync(out3);
  auto out3_inner_updated = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::propagate_xla_data_direct(out3_inner, out3_inner_updated);
  auto out4_inner = at::functionalization::impl::from_functional_tensor(out4);
  at::functionalization::impl::replace_(out4, std::get<4>(tmp_output));
  at::functionalization::impl::commit_update(out4);
  at::functionalization::impl::sync(out4);
  auto out4_inner_updated = at::functionalization::impl::from_functional_tensor(out4);
  at::functionalization::impl::propagate_xla_data_direct(out4_inner, out4_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3, out4);
      }
    }

    void miopen_rnn_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional<at::Tensor> & cx, const at::Tensor & output, const ::std::optional<at::Tensor> & grad_output, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto weight_buf_meta = to_meta(weight_buf);
        auto hx_meta = to_meta(hx);
        auto cx_meta = to_meta(cx);
        auto output_meta = to_meta(output);
        auto grad_output_meta = to_meta(grad_output);
        auto grad_hy_meta = to_meta(grad_hy);
        auto grad_cy_meta = to_meta(grad_cy);
        auto dropout_state_meta = to_meta(dropout_state);
        auto reserve_meta = to_meta(reserve);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        auto out3_meta = to_meta(out3);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::miopen_rnn_backward_out::call(input_meta, weight_meta, weight_stride0, weight_buf_meta, hx_meta, cx_meta, output_meta, grad_output_meta, grad_hy_meta, grad_cy_meta, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_meta, reserve_meta, output_mask, out0_meta, out1_meta, out2_meta, out3_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      ::std::vector<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight.vec();
      }
      
      at::Tensor weight_buf_;
      if (at::functionalization::impl::isFunctionalTensor(weight_buf)) {
        at::functionalization::impl::sync(weight_buf);
        weight_buf_ = at::functionalization::impl::from_functional_tensor(weight_buf);
      } else {
        weight_buf_ = weight_buf;
      }
      
      at::Tensor hx_;
      if (at::functionalization::impl::isFunctionalTensor(hx)) {
        at::functionalization::impl::sync(hx);
        hx_ = at::functionalization::impl::from_functional_tensor(hx);
      } else {
        hx_ = hx;
      }
      
      ::std::optional<at::Tensor> cx_;
      if (at::functionalization::impl::isFunctionalTensor(cx)) {
        at::functionalization::impl::sync(cx);
        cx_ = at::functionalization::impl::from_functional_tensor(cx);
      } else {
        cx_ = cx;
      }
      
      at::Tensor output_;
      if (at::functionalization::impl::isFunctionalTensor(output)) {
        at::functionalization::impl::sync(output);
        output_ = at::functionalization::impl::from_functional_tensor(output);
      } else {
        output_ = output;
      }
      
      ::std::optional<at::Tensor> grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      ::std::optional<at::Tensor> grad_hy_;
      if (at::functionalization::impl::isFunctionalTensor(grad_hy)) {
        at::functionalization::impl::sync(grad_hy);
        grad_hy_ = at::functionalization::impl::from_functional_tensor(grad_hy);
      } else {
        grad_hy_ = grad_hy;
      }
      
      ::std::optional<at::Tensor> grad_cy_;
      if (at::functionalization::impl::isFunctionalTensor(grad_cy)) {
        at::functionalization::impl::sync(grad_cy);
        grad_cy_ = at::functionalization::impl::from_functional_tensor(grad_cy);
      } else {
        grad_cy_ = grad_cy;
      }
      
      ::std::optional<at::Tensor> dropout_state_;
      if (at::functionalization::impl::isFunctionalTensor(dropout_state)) {
        at::functionalization::impl::sync(dropout_state);
        dropout_state_ = at::functionalization::impl::from_functional_tensor(dropout_state);
      } else {
        dropout_state_ = dropout_state;
      }
      
      at::Tensor reserve_;
      if (at::functionalization::impl::isFunctionalTensor(reserve)) {
        at::functionalization::impl::sync(reserve);
        reserve_ = at::functionalization::impl::from_functional_tensor(reserve);
      } else {
        reserve_ = reserve;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      
      ::std::vector<at::Tensor> out3_;
      if (at::functionalization::impl::isFunctionalTensor(out3)) {
        at::functionalization::impl::sync(out3);
        out3_ = at::functionalization::impl::from_functional_tensor(out3);
      } else {
        out3_ = out3.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || weight_buf.device().type() == c10::DeviceType::XLA || hx.device().type() == c10::DeviceType::XLA || output.device().type() == c10::DeviceType::XLA || reserve.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(weight_buf) || at::functionalization::impl::isFunctionalTensor(hx) || at::functionalization::impl::isFunctionalTensor(cx) || at::functionalization::impl::isFunctionalTensor(output) || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(grad_hy) || at::functionalization::impl::isFunctionalTensor(grad_cy) || at::functionalization::impl::isFunctionalTensor(dropout_state) || at::functionalization::impl::isFunctionalTensor(reserve))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::miopen_rnn_backward_out::call(input_, weight_, weight_stride0, weight_buf_, hx_, cx_, output_, grad_output_, grad_hy_, grad_cy_, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_, reserve_, output_mask, out0_, out1_, out2_, out3_);
         
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::miopen_rnn_backward::call(input_, weight_, weight_stride0, weight_buf_, hx_, cx_, output_, grad_output_, grad_hy_, grad_cy_, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_, reserve_, output_mask);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
  auto out3_inner = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::replace_(out3, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(out3);
  at::functionalization::impl::sync(out3);
  auto out3_inner_updated = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::propagate_xla_data_direct(out3_inner, out3_inner_updated);
    
      }
    }

    at::Tensor & mm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto mat2_meta = to_meta(mat2);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mm_out::call(self_meta, mat2_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor mat2_;
      if (at::functionalization::impl::isFunctionalTensor(mat2)) {
        at::functionalization::impl::sync(mat2);
        mat2_ = at::functionalization::impl::from_functional_tensor(mat2);
      } else {
        mat2_ = mat2;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || mat2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mat2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mm_out::call(self_, mat2_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mm::call(self_, mat2_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _int_mm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto mat2_meta = to_meta(mat2);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_int_mm_out::call(self_meta, mat2_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor mat2_;
      if (at::functionalization::impl::isFunctionalTensor(mat2)) {
        at::functionalization::impl::sync(mat2);
        mat2_ = at::functionalization::impl::from_functional_tensor(mat2);
      } else {
        mat2_ = mat2;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || mat2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mat2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_int_mm_out::call(self_, mat2_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_int_mm::call(self_, mat2_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _sparse_sparse_matmul_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_sparse_sparse_matmul_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_sparse_sparse_matmul_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_sparse_sparse_matmul::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> mode_out_values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto values_meta = to_meta(values);
        auto indices_meta = to_meta(indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mode_values::call(self_meta, dim, keepdim, values_meta, indices_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::mode_values::call(self_, dim, keepdim, values_, indices_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mode::call(self_, dim, keepdim);
        }
          auto values_inner = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::replace_(values, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(values);
  at::functionalization::impl::sync(values);
  auto values_inner_updated = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::propagate_xla_data_direct(values_inner, values_inner_updated);
  auto indices_inner = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::replace_(indices, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(indices);
  at::functionalization::impl::sync(indices);
  auto indices_inner_updated = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::propagate_xla_data_direct(indices_inner, indices_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> mode_out_dimname_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto values_meta = to_meta(values);
        auto indices_meta = to_meta(indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mode_dimname_out::call(self_meta, dim, keepdim, values_meta, indices_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::mode_dimname_out::call(self_, dim, keepdim, values_, indices_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mode_dimname::call(self_, dim, keepdim);
        }
          auto values_inner = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::replace_(values, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(values);
  at::functionalization::impl::sync(values);
  auto values_inner_updated = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::propagate_xla_data_direct(values_inner, values_inner_updated);
  auto indices_inner = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::replace_(indices, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(indices);
  at::functionalization::impl::sync(indices);
  auto indices_inner_updated = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::propagate_xla_data_direct(indices_inner, indices_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
      }
    }

    at::Tensor & mul_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mul_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mul_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mul_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & mul__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mul__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mul__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mul_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & mul_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mul_Scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mul_Scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mul_Scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & mul__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mul__Scalar::call(self_meta, other);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mul__Scalar::call(self_, other);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mul_Scalar::call(self_, other);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & multiply_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::multiply_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::multiply_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::multiply_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & multiply__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::multiply__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::multiply__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::multiply_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & mv_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto vec_meta = to_meta(vec);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mv_out::call(self_meta, vec_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor vec_;
      if (at::functionalization::impl::isFunctionalTensor(vec)) {
        at::functionalization::impl::sync(vec);
        vec_ = at::functionalization::impl::from_functional_tensor(vec);
      } else {
        vec_ = vec;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || vec.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(vec))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mv_out::call(self_, vec_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mv::call(self_, vec_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & mvlgamma_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t p, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mvlgamma_out::call(self_meta, p, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mvlgamma_out::call(self_, p, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mvlgamma::call(self_, p);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & mvlgamma_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t p) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mvlgamma_::call(self_meta, p);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mvlgamma_::call(self_, p);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mvlgamma::call(self_, p);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & narrow_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::narrow_copy_out::call(self_meta, dim, start, length, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::narrow_copy_out::call(self_, dim, start, length, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::narrow_copy::call(self_, dim, start, length);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_batch_norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto running_mean_meta = to_meta(running_mean);
        auto running_var_meta = to_meta(running_var);
        auto out_meta = to_meta(out);
        auto save_mean_meta = to_meta(save_mean);
        auto save_invstd_meta = to_meta(save_invstd);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::native_batch_norm_out::call(input_meta, weight_meta, bias_meta, running_mean_meta, running_var_meta, training, momentum, eps, out_meta, save_mean_meta, save_invstd_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      ::std::optional<at::Tensor> running_mean_;
      if (at::functionalization::impl::isFunctionalTensor(running_mean)) {
        at::functionalization::impl::sync(running_mean);
        running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean);
      } else {
        running_mean_ = running_mean;
      }
      
      ::std::optional<at::Tensor> running_var_;
      if (at::functionalization::impl::isFunctionalTensor(running_var)) {
        at::functionalization::impl::sync(running_var);
        running_var_ = at::functionalization::impl::from_functional_tensor(running_var);
      } else {
        running_var_ = running_var;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      
      at::Tensor save_mean_;
      if (at::functionalization::impl::isFunctionalTensor(save_mean)) {
        at::functionalization::impl::sync(save_mean);
        save_mean_ = at::functionalization::impl::from_functional_tensor(save_mean);
      } else {
        save_mean_ = save_mean;
      }
      
      at::Tensor save_invstd_;
      if (at::functionalization::impl::isFunctionalTensor(save_invstd)) {
        at::functionalization::impl::sync(save_invstd);
        save_invstd_ = at::functionalization::impl::from_functional_tensor(save_invstd);
      } else {
        save_invstd_ = save_invstd;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out) && at::functionalization::impl::isFunctionalTensor(save_mean) && at::functionalization::impl::isFunctionalTensor(save_invstd))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias) || at::functionalization::impl::isFunctionalTensor(running_mean) || at::functionalization::impl::isFunctionalTensor(running_var))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::native_batch_norm_out::call(input_, weight_, bias_, running_mean_, running_var_, training, momentum, eps, out_, save_mean_, save_invstd_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out, save_mean, save_invstd);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::native_batch_norm::call(input_, weight_, bias_, running_mean_, running_var_, training, momentum, eps);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
  auto save_mean_inner = at::functionalization::impl::from_functional_tensor(save_mean);
  at::functionalization::impl::replace_(save_mean, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(save_mean);
  at::functionalization::impl::sync(save_mean);
  auto save_mean_inner_updated = at::functionalization::impl::from_functional_tensor(save_mean);
  at::functionalization::impl::propagate_xla_data_direct(save_mean_inner, save_mean_inner_updated);
  auto save_invstd_inner = at::functionalization::impl::from_functional_tensor(save_invstd);
  at::functionalization::impl::replace_(save_invstd, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(save_invstd);
  at::functionalization::impl::sync(save_invstd);
  auto save_invstd_inner_updated = at::functionalization::impl::from_functional_tensor(save_invstd);
  at::functionalization::impl::propagate_xla_data_direct(save_invstd_inner, save_invstd_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out, save_mean, save_invstd);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto running_mean_meta = to_meta(running_mean);
        auto running_var_meta = to_meta(running_var);
        auto out_meta = to_meta(out);
        auto save_mean_meta = to_meta(save_mean);
        auto save_invstd_meta = to_meta(save_invstd);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_native_batch_norm_legit_out::call(input_meta, weight_meta, bias_meta, running_mean_meta, running_var_meta, training, momentum, eps, out_meta, save_mean_meta, save_invstd_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor running_mean_;
      if (at::functionalization::impl::isFunctionalTensor(running_mean)) {
        at::functionalization::impl::sync(running_mean);
        running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean);
      } else {
        running_mean_ = running_mean;
      }
      
      at::Tensor running_var_;
      if (at::functionalization::impl::isFunctionalTensor(running_var)) {
        at::functionalization::impl::sync(running_var);
        running_var_ = at::functionalization::impl::from_functional_tensor(running_var);
      } else {
        running_var_ = running_var;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      
      at::Tensor save_mean_;
      if (at::functionalization::impl::isFunctionalTensor(save_mean)) {
        at::functionalization::impl::sync(save_mean);
        save_mean_ = at::functionalization::impl::from_functional_tensor(save_mean);
      } else {
        save_mean_ = save_mean;
      }
      
      at::Tensor save_invstd_;
      if (at::functionalization::impl::isFunctionalTensor(save_invstd)) {
        at::functionalization::impl::sync(save_invstd);
        save_invstd_ = at::functionalization::impl::from_functional_tensor(save_invstd);
      } else {
        save_invstd_ = save_invstd;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(running_mean) && at::functionalization::impl::isFunctionalTensor(running_var) && at::functionalization::impl::isFunctionalTensor(out) && at::functionalization::impl::isFunctionalTensor(save_mean) && at::functionalization::impl::isFunctionalTensor(save_invstd))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_native_batch_norm_legit_out::call(input_, weight_, bias_, running_mean_, running_var_, training, momentum, eps, out_, save_mean_, save_invstd_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out, save_mean, save_invstd);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_native_batch_norm_legit_functional::call(input_, weight_, bias_, running_mean_, running_var_, training, momentum, eps);
        }
          auto running_mean_inner = at::functionalization::impl::from_functional_tensor(running_mean);
  at::functionalization::impl::replace_(running_mean, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(running_mean);
  at::functionalization::impl::sync(running_mean);
  auto running_mean_inner_updated = at::functionalization::impl::from_functional_tensor(running_mean);
  at::functionalization::impl::propagate_xla_data_direct(running_mean_inner, running_mean_inner_updated);
  auto running_var_inner = at::functionalization::impl::from_functional_tensor(running_var);
  at::functionalization::impl::replace_(running_var, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(running_var);
  at::functionalization::impl::sync(running_var);
  auto running_var_inner_updated = at::functionalization::impl::from_functional_tensor(running_var);
  at::functionalization::impl::propagate_xla_data_direct(running_var_inner, running_var_inner_updated);
  auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
  auto save_mean_inner = at::functionalization::impl::from_functional_tensor(save_mean);
  at::functionalization::impl::replace_(save_mean, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(save_mean);
  at::functionalization::impl::sync(save_mean);
  auto save_mean_inner_updated = at::functionalization::impl::from_functional_tensor(save_mean);
  at::functionalization::impl::propagate_xla_data_direct(save_mean_inner, save_mean_inner_updated);
  auto save_invstd_inner = at::functionalization::impl::from_functional_tensor(save_invstd);
  at::functionalization::impl::replace_(save_invstd, std::get<4>(tmp_output));
  at::functionalization::impl::commit_update(save_invstd);
  at::functionalization::impl::sync(save_invstd);
  auto save_invstd_inner_updated = at::functionalization::impl::from_functional_tensor(save_invstd);
  at::functionalization::impl::propagate_xla_data_direct(save_invstd_inner, save_invstd_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out, save_mean, save_invstd);
      }
    }

    ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto running_mean_meta = to_meta(running_mean);
        auto running_var_meta = to_meta(running_var);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_native_batch_norm_legit::call(input_meta, weight_meta, bias_meta, running_mean_meta, running_var_meta, training, momentum, eps);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor running_mean_;
      if (at::functionalization::impl::isFunctionalTensor(running_mean)) {
        at::functionalization::impl::sync(running_mean);
        running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean);
      } else {
        running_mean_ = running_mean;
      }
      
      at::Tensor running_var_;
      if (at::functionalization::impl::isFunctionalTensor(running_var)) {
        at::functionalization::impl::sync(running_var);
        running_var_ = at::functionalization::impl::from_functional_tensor(running_var);
      } else {
        running_var_ = running_var;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(running_mean) && at::functionalization::impl::isFunctionalTensor(running_var))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_native_batch_norm_legit::call(input_, weight_, bias_, running_mean_, running_var_, training, momentum, eps);
         return ::std::tuple<at::Tensor,at::Tensor,at::Tensor>(std::get<0>(tmp_output), std::get<1>(tmp_output), std::get<2>(tmp_output));
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_native_batch_norm_legit_functional::call(input_, weight_, bias_, running_mean_, running_var_, training, momentum, eps);
        }
          auto output_0 = at::functionalization::impl::to_functional_tensor(std::get<0>(tmp_output));
  auto output_1 = at::functionalization::impl::to_functional_tensor(std::get<1>(tmp_output));
  auto output_2 = at::functionalization::impl::to_functional_tensor(std::get<2>(tmp_output));
  auto running_mean_inner = at::functionalization::impl::from_functional_tensor(running_mean);
  at::functionalization::impl::replace_(running_mean, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(running_mean);
  at::functionalization::impl::sync(running_mean);
  auto running_mean_inner_updated = at::functionalization::impl::from_functional_tensor(running_mean);
  at::functionalization::impl::propagate_xla_data_direct(running_mean_inner, running_mean_inner_updated);
  auto running_var_inner = at::functionalization::impl::from_functional_tensor(running_var);
  at::functionalization::impl::replace_(running_var, std::get<4>(tmp_output));
  at::functionalization::impl::commit_update(running_var);
  at::functionalization::impl::sync(running_var);
  auto running_var_inner_updated = at::functionalization::impl::from_functional_tensor(running_var);
  at::functionalization::impl::propagate_xla_data_direct(running_var_inner, running_var_inner_updated);
    return ::std::tuple<at::Tensor,at::Tensor,at::Tensor>(output_0, output_1, output_2);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_no_training_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto running_mean_meta = to_meta(running_mean);
        auto running_var_meta = to_meta(running_var);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_native_batch_norm_legit_no_training_out::call(input_meta, weight_meta, bias_meta, running_mean_meta, running_var_meta, momentum, eps, out0_meta, out1_meta, out2_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor running_mean_;
      if (at::functionalization::impl::isFunctionalTensor(running_mean)) {
        at::functionalization::impl::sync(running_mean);
        running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean);
      } else {
        running_mean_ = running_mean;
      }
      
      at::Tensor running_var_;
      if (at::functionalization::impl::isFunctionalTensor(running_var)) {
        at::functionalization::impl::sync(running_var);
        running_var_ = at::functionalization::impl::from_functional_tensor(running_var);
      } else {
        running_var_ = running_var;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || running_mean.device().type() == c10::DeviceType::XLA || running_var.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias) || at::functionalization::impl::isFunctionalTensor(running_mean) || at::functionalization::impl::isFunctionalTensor(running_var))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_native_batch_norm_legit_no_training_out::call(input_, weight_, bias_, running_mean_, running_var_, momentum, eps, out0_, out1_, out2_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_native_batch_norm_legit_no_training::call(input_, weight_, bias_, running_mean_, running_var_, momentum, eps);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_out_no_stats_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto out_meta = to_meta(out);
        auto save_mean_meta = to_meta(save_mean);
        auto save_invstd_meta = to_meta(save_invstd);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_native_batch_norm_legit_no_stats_out::call(input_meta, weight_meta, bias_meta, training, momentum, eps, out_meta, save_mean_meta, save_invstd_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      
      at::Tensor save_mean_;
      if (at::functionalization::impl::isFunctionalTensor(save_mean)) {
        at::functionalization::impl::sync(save_mean);
        save_mean_ = at::functionalization::impl::from_functional_tensor(save_mean);
      } else {
        save_mean_ = save_mean;
      }
      
      at::Tensor save_invstd_;
      if (at::functionalization::impl::isFunctionalTensor(save_invstd)) {
        at::functionalization::impl::sync(save_invstd);
        save_invstd_ = at::functionalization::impl::from_functional_tensor(save_invstd);
      } else {
        save_invstd_ = save_invstd;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out) && at::functionalization::impl::isFunctionalTensor(save_mean) && at::functionalization::impl::isFunctionalTensor(save_invstd))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_native_batch_norm_legit_no_stats_out::call(input_, weight_, bias_, training, momentum, eps, out_, save_mean_, save_invstd_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out, save_mean, save_invstd);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_native_batch_norm_legit_no_stats::call(input_, weight_, bias_, training, momentum, eps);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
  auto save_mean_inner = at::functionalization::impl::from_functional_tensor(save_mean);
  at::functionalization::impl::replace_(save_mean, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(save_mean);
  at::functionalization::impl::sync(save_mean);
  auto save_mean_inner_updated = at::functionalization::impl::from_functional_tensor(save_mean);
  at::functionalization::impl::propagate_xla_data_direct(save_mean_inner, save_mean_inner_updated);
  auto save_invstd_inner = at::functionalization::impl::from_functional_tensor(save_invstd);
  at::functionalization::impl::replace_(save_invstd, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(save_invstd);
  at::functionalization::impl::sync(save_invstd);
  auto save_invstd_inner_updated = at::functionalization::impl::from_functional_tensor(save_invstd);
  at::functionalization::impl::propagate_xla_data_direct(save_invstd_inner, save_invstd_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out, save_mean, save_invstd);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_stats_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double eps, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::batch_norm_stats_out::call(input_meta, eps, out0_meta, out1_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::batch_norm_stats_out::call(input_, eps, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::batch_norm_stats::call(input_, eps);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    at::Tensor & batch_norm_elemt_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto mean_meta = to_meta(mean);
        auto invstd_meta = to_meta(invstd);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::batch_norm_elemt_out::call(input_meta, weight_meta, bias_meta, mean_meta, invstd_meta, eps, out_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor mean_;
      if (at::functionalization::impl::isFunctionalTensor(mean)) {
        at::functionalization::impl::sync(mean);
        mean_ = at::functionalization::impl::from_functional_tensor(mean);
      } else {
        mean_ = mean;
      }
      
      at::Tensor invstd_;
      if (at::functionalization::impl::isFunctionalTensor(invstd)) {
        at::functionalization::impl::sync(invstd);
        invstd_ = at::functionalization::impl::from_functional_tensor(invstd);
      } else {
        invstd_ = invstd;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || mean.device().type() == c10::DeviceType::XLA || invstd.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias) || at::functionalization::impl::isFunctionalTensor(mean) || at::functionalization::impl::isFunctionalTensor(invstd))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::batch_norm_elemt_out::call(input_, weight_, bias_, mean_, invstd_, eps, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::batch_norm_elemt::call(input_, weight_, bias_, mean_, invstd_, eps);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, double eps, int64_t count, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto mean_meta = to_meta(mean);
        auto invstd_meta = to_meta(invstd);
        auto running_mean_meta = to_meta(running_mean);
        auto running_var_meta = to_meta(running_var);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::batch_norm_gather_stats_out::call(input_meta, mean_meta, invstd_meta, running_mean_meta, running_var_meta, momentum, eps, count, out0_meta, out1_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor mean_;
      if (at::functionalization::impl::isFunctionalTensor(mean)) {
        at::functionalization::impl::sync(mean);
        mean_ = at::functionalization::impl::from_functional_tensor(mean);
      } else {
        mean_ = mean;
      }
      
      at::Tensor invstd_;
      if (at::functionalization::impl::isFunctionalTensor(invstd)) {
        at::functionalization::impl::sync(invstd);
        invstd_ = at::functionalization::impl::from_functional_tensor(invstd);
      } else {
        invstd_ = invstd;
      }
      
      ::std::optional<at::Tensor> running_mean_;
      if (at::functionalization::impl::isFunctionalTensor(running_mean)) {
        at::functionalization::impl::sync(running_mean);
        running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean);
      } else {
        running_mean_ = running_mean;
      }
      
      ::std::optional<at::Tensor> running_var_;
      if (at::functionalization::impl::isFunctionalTensor(running_var)) {
        at::functionalization::impl::sync(running_var);
        running_var_ = at::functionalization::impl::from_functional_tensor(running_var);
      } else {
        running_var_ = running_var;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || mean.device().type() == c10::DeviceType::XLA || invstd.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(mean) || at::functionalization::impl::isFunctionalTensor(invstd) || at::functionalization::impl::isFunctionalTensor(running_mean) || at::functionalization::impl::isFunctionalTensor(running_var))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::batch_norm_gather_stats_out::call(input_, mean_, invstd_, running_mean_, running_var_, momentum, eps, count, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::batch_norm_gather_stats::call(input_, mean_, invstd_, running_mean_, running_var_, momentum, eps, count);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_gather_stats_with_counts_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto mean_meta = to_meta(mean);
        auto invstd_meta = to_meta(invstd);
        auto running_mean_meta = to_meta(running_mean);
        auto running_var_meta = to_meta(running_var);
        auto counts_meta = to_meta(counts);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::batch_norm_gather_stats_with_counts_out::call(input_meta, mean_meta, invstd_meta, running_mean_meta, running_var_meta, momentum, eps, counts_meta, out0_meta, out1_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor mean_;
      if (at::functionalization::impl::isFunctionalTensor(mean)) {
        at::functionalization::impl::sync(mean);
        mean_ = at::functionalization::impl::from_functional_tensor(mean);
      } else {
        mean_ = mean;
      }
      
      at::Tensor invstd_;
      if (at::functionalization::impl::isFunctionalTensor(invstd)) {
        at::functionalization::impl::sync(invstd);
        invstd_ = at::functionalization::impl::from_functional_tensor(invstd);
      } else {
        invstd_ = invstd;
      }
      
      ::std::optional<at::Tensor> running_mean_;
      if (at::functionalization::impl::isFunctionalTensor(running_mean)) {
        at::functionalization::impl::sync(running_mean);
        running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean);
      } else {
        running_mean_ = running_mean;
      }
      
      ::std::optional<at::Tensor> running_var_;
      if (at::functionalization::impl::isFunctionalTensor(running_var)) {
        at::functionalization::impl::sync(running_var);
        running_var_ = at::functionalization::impl::from_functional_tensor(running_var);
      } else {
        running_var_ = running_var;
      }
      
      at::Tensor counts_;
      if (at::functionalization::impl::isFunctionalTensor(counts)) {
        at::functionalization::impl::sync(counts);
        counts_ = at::functionalization::impl::from_functional_tensor(counts);
      } else {
        counts_ = counts;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || mean.device().type() == c10::DeviceType::XLA || invstd.device().type() == c10::DeviceType::XLA || counts.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(mean) || at::functionalization::impl::isFunctionalTensor(invstd) || at::functionalization::impl::isFunctionalTensor(running_mean) || at::functionalization::impl::isFunctionalTensor(running_var) || at::functionalization::impl::isFunctionalTensor(counts))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::batch_norm_gather_stats_with_counts_out::call(input_, mean_, invstd_, running_mean_, running_var_, momentum, eps, counts_, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::batch_norm_gather_stats_with_counts::call(input_, mean_, invstd_, running_mean_, running_var_, momentum, eps, counts_);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_batch_norm_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_out_meta = to_meta(grad_out);
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto running_mean_meta = to_meta(running_mean);
        auto running_var_meta = to_meta(running_var);
        auto save_mean_meta = to_meta(save_mean);
        auto save_invstd_meta = to_meta(save_invstd);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::native_batch_norm_backward_out::call(grad_out_meta, input_meta, weight_meta, running_mean_meta, running_var_meta, save_mean_meta, save_invstd_meta, train, eps, output_mask, out0_meta, out1_meta, out2_meta);
      }
      
      at::Tensor grad_out_;
      if (at::functionalization::impl::isFunctionalTensor(grad_out)) {
        at::functionalization::impl::sync(grad_out);
        grad_out_ = at::functionalization::impl::from_functional_tensor(grad_out);
      } else {
        grad_out_ = grad_out;
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> running_mean_;
      if (at::functionalization::impl::isFunctionalTensor(running_mean)) {
        at::functionalization::impl::sync(running_mean);
        running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean);
      } else {
        running_mean_ = running_mean;
      }
      
      ::std::optional<at::Tensor> running_var_;
      if (at::functionalization::impl::isFunctionalTensor(running_var)) {
        at::functionalization::impl::sync(running_var);
        running_var_ = at::functionalization::impl::from_functional_tensor(running_var);
      } else {
        running_var_ = running_var;
      }
      
      ::std::optional<at::Tensor> save_mean_;
      if (at::functionalization::impl::isFunctionalTensor(save_mean)) {
        at::functionalization::impl::sync(save_mean);
        save_mean_ = at::functionalization::impl::from_functional_tensor(save_mean);
      } else {
        save_mean_ = save_mean;
      }
      
      ::std::optional<at::Tensor> save_invstd_;
      if (at::functionalization::impl::isFunctionalTensor(save_invstd)) {
        at::functionalization::impl::sync(save_invstd);
        save_invstd_ = at::functionalization::impl::from_functional_tensor(save_invstd);
      } else {
        save_invstd_ = save_invstd;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_out.device().type() == c10::DeviceType::XLA || input.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_out) || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(running_mean) || at::functionalization::impl::isFunctionalTensor(running_var) || at::functionalization::impl::isFunctionalTensor(save_mean) || at::functionalization::impl::isFunctionalTensor(save_invstd))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::native_batch_norm_backward_out::call(grad_out_, input_, weight_, running_mean_, running_var_, save_mean_, save_invstd_, train, eps, output_mask, out0_, out1_, out2_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::native_batch_norm_backward::call(grad_out_, input_, weight_, running_mean_, running_var_, save_mean_, save_invstd_, train, eps, output_mask);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> batch_norm_backward_reduce_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & weight, bool input_g, bool weight_g, bool bias_g, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_out_meta = to_meta(grad_out);
        auto input_meta = to_meta(input);
        auto mean_meta = to_meta(mean);
        auto invstd_meta = to_meta(invstd);
        auto weight_meta = to_meta(weight);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        auto out3_meta = to_meta(out3);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::batch_norm_backward_reduce_out::call(grad_out_meta, input_meta, mean_meta, invstd_meta, weight_meta, input_g, weight_g, bias_g, out0_meta, out1_meta, out2_meta, out3_meta);
      }
      
      at::Tensor grad_out_;
      if (at::functionalization::impl::isFunctionalTensor(grad_out)) {
        at::functionalization::impl::sync(grad_out);
        grad_out_ = at::functionalization::impl::from_functional_tensor(grad_out);
      } else {
        grad_out_ = grad_out;
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor mean_;
      if (at::functionalization::impl::isFunctionalTensor(mean)) {
        at::functionalization::impl::sync(mean);
        mean_ = at::functionalization::impl::from_functional_tensor(mean);
      } else {
        mean_ = mean;
      }
      
      at::Tensor invstd_;
      if (at::functionalization::impl::isFunctionalTensor(invstd)) {
        at::functionalization::impl::sync(invstd);
        invstd_ = at::functionalization::impl::from_functional_tensor(invstd);
      } else {
        invstd_ = invstd;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      
      at::Tensor out3_;
      if (at::functionalization::impl::isFunctionalTensor(out3)) {
        at::functionalization::impl::sync(out3);
        out3_ = at::functionalization::impl::from_functional_tensor(out3);
      } else {
        out3_ = out3;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_out.device().type() == c10::DeviceType::XLA || input.device().type() == c10::DeviceType::XLA || mean.device().type() == c10::DeviceType::XLA || invstd.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_out) || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(mean) || at::functionalization::impl::isFunctionalTensor(invstd) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::batch_norm_backward_reduce_out::call(grad_out_, input_, mean_, invstd_, weight_, input_g, weight_g, bias_g, out0_, out1_, out2_, out3_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::batch_norm_backward_reduce::call(grad_out_, input_, mean_, invstd_, weight_, input_g, weight_g, bias_g);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
  auto out3_inner = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::replace_(out3, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(out3);
  at::functionalization::impl::sync(out3);
  auto out3_inner_updated = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::propagate_xla_data_direct(out3_inner, out3_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3);
      }
    }

    at::Tensor & batch_norm_backward_elemt_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional<at::Tensor> & weight, const at::Tensor & sum_dy, const at::Tensor & sum_dy_xmu, const at::Tensor & count, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_out_meta = to_meta(grad_out);
        auto input_meta = to_meta(input);
        auto mean_meta = to_meta(mean);
        auto invstd_meta = to_meta(invstd);
        auto weight_meta = to_meta(weight);
        auto sum_dy_meta = to_meta(sum_dy);
        auto sum_dy_xmu_meta = to_meta(sum_dy_xmu);
        auto count_meta = to_meta(count);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::batch_norm_backward_elemt_out::call(grad_out_meta, input_meta, mean_meta, invstd_meta, weight_meta, sum_dy_meta, sum_dy_xmu_meta, count_meta, out_meta);
      }
      
      at::Tensor grad_out_;
      if (at::functionalization::impl::isFunctionalTensor(grad_out)) {
        at::functionalization::impl::sync(grad_out);
        grad_out_ = at::functionalization::impl::from_functional_tensor(grad_out);
      } else {
        grad_out_ = grad_out;
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor mean_;
      if (at::functionalization::impl::isFunctionalTensor(mean)) {
        at::functionalization::impl::sync(mean);
        mean_ = at::functionalization::impl::from_functional_tensor(mean);
      } else {
        mean_ = mean;
      }
      
      at::Tensor invstd_;
      if (at::functionalization::impl::isFunctionalTensor(invstd)) {
        at::functionalization::impl::sync(invstd);
        invstd_ = at::functionalization::impl::from_functional_tensor(invstd);
      } else {
        invstd_ = invstd;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor sum_dy_;
      if (at::functionalization::impl::isFunctionalTensor(sum_dy)) {
        at::functionalization::impl::sync(sum_dy);
        sum_dy_ = at::functionalization::impl::from_functional_tensor(sum_dy);
      } else {
        sum_dy_ = sum_dy;
      }
      
      at::Tensor sum_dy_xmu_;
      if (at::functionalization::impl::isFunctionalTensor(sum_dy_xmu)) {
        at::functionalization::impl::sync(sum_dy_xmu);
        sum_dy_xmu_ = at::functionalization::impl::from_functional_tensor(sum_dy_xmu);
      } else {
        sum_dy_xmu_ = sum_dy_xmu;
      }
      
      at::Tensor count_;
      if (at::functionalization::impl::isFunctionalTensor(count)) {
        at::functionalization::impl::sync(count);
        count_ = at::functionalization::impl::from_functional_tensor(count);
      } else {
        count_ = count;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_out.device().type() == c10::DeviceType::XLA || input.device().type() == c10::DeviceType::XLA || mean.device().type() == c10::DeviceType::XLA || invstd.device().type() == c10::DeviceType::XLA || sum_dy.device().type() == c10::DeviceType::XLA || sum_dy_xmu.device().type() == c10::DeviceType::XLA || count.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_out) || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(mean) || at::functionalization::impl::isFunctionalTensor(invstd) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(sum_dy) || at::functionalization::impl::isFunctionalTensor(sum_dy_xmu) || at::functionalization::impl::isFunctionalTensor(count))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::batch_norm_backward_elemt_out::call(grad_out_, input_, mean_, invstd_, weight_, sum_dy_, sum_dy_xmu_, count_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::batch_norm_backward_elemt::call(grad_out_, input_, mean_, invstd_, weight_, sum_dy_, sum_dy_xmu_, count_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_update_stats_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto running_mean_meta = to_meta(running_mean);
        auto running_var_meta = to_meta(running_var);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::batch_norm_update_stats_out::call(input_meta, running_mean_meta, running_var_meta, momentum, out0_meta, out1_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      ::std::optional<at::Tensor> running_mean_;
      if (at::functionalization::impl::isFunctionalTensor(running_mean)) {
        at::functionalization::impl::sync(running_mean);
        running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean);
      } else {
        running_mean_ = running_mean;
      }
      
      ::std::optional<at::Tensor> running_var_;
      if (at::functionalization::impl::isFunctionalTensor(running_var)) {
        at::functionalization::impl::sync(running_var);
        running_var_ = at::functionalization::impl::from_functional_tensor(running_var);
      } else {
        running_var_ = running_var;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(running_mean) || at::functionalization::impl::isFunctionalTensor(running_var))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::batch_norm_update_stats_out::call(input_, running_mean_, running_var_, momentum, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::batch_norm_update_stats::call(input_, running_mean_, running_var_, momentum);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    at::Tensor & _nnpack_spatial_convolution_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_nnpack_spatial_convolution_out::call(input_meta, weight_meta, bias_meta, padding, stride, out_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_nnpack_spatial_convolution_out::call(input_, weight_, bias_, padding, stride, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_nnpack_spatial_convolution::call(input_, weight_, bias_, padding, stride);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & ones_out_names_out(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::ones_names_out::call(size, names, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::ones_names_out::call(size, names, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::ones_names::call(size, names, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & ones_out_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::ones_out::call(size, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::ones_out::call(size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::ones::call(size, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & ones_like_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::ones_like_out::call(self_meta, memory_format, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::ones_like_out::call(self_, memory_format, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::ones_like::call(self_, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt, memory_format);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _euclidean_dist_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x1_meta = to_meta(x1);
        auto x2_meta = to_meta(x2);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_euclidean_dist_out::call(x1_meta, x2_meta, out_meta);
      }
      
      at::Tensor x1_;
      if (at::functionalization::impl::isFunctionalTensor(x1)) {
        at::functionalization::impl::sync(x1);
        x1_ = at::functionalization::impl::from_functional_tensor(x1);
      } else {
        x1_ = x1;
      }
      
      at::Tensor x2_;
      if (at::functionalization::impl::isFunctionalTensor(x2)) {
        at::functionalization::impl::sync(x2);
        x2_ = at::functionalization::impl::from_functional_tensor(x2);
      } else {
        x2_ = x2;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x1.device().type() == c10::DeviceType::XLA || x2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x1) || at::functionalization::impl::isFunctionalTensor(x2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_euclidean_dist_out::call(x1_, x2_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_euclidean_dist::call(x1_, x2_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _cdist_forward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, double p, ::std::optional<int64_t> compute_mode, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x1_meta = to_meta(x1);
        auto x2_meta = to_meta(x2);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_cdist_forward_out::call(x1_meta, x2_meta, p, compute_mode, out_meta);
      }
      
      at::Tensor x1_;
      if (at::functionalization::impl::isFunctionalTensor(x1)) {
        at::functionalization::impl::sync(x1);
        x1_ = at::functionalization::impl::from_functional_tensor(x1);
      } else {
        x1_ = x1;
      }
      
      at::Tensor x2_;
      if (at::functionalization::impl::isFunctionalTensor(x2)) {
        at::functionalization::impl::sync(x2);
        x2_ = at::functionalization::impl::from_functional_tensor(x2);
      } else {
        x2_ = x2;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x1.device().type() == c10::DeviceType::XLA || x2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x1) || at::functionalization::impl::isFunctionalTensor(x2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_cdist_forward_out::call(x1_, x2_, p, compute_mode, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_cdist_forward::call(x1_, x2_, p, compute_mode);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _cdist_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_meta = to_meta(grad);
        auto x1_meta = to_meta(x1);
        auto x2_meta = to_meta(x2);
        auto cdist_meta = to_meta(cdist);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_cdist_backward_out::call(grad_meta, x1_meta, x2_meta, p, cdist_meta, out_meta);
      }
      
      at::Tensor grad_;
      if (at::functionalization::impl::isFunctionalTensor(grad)) {
        at::functionalization::impl::sync(grad);
        grad_ = at::functionalization::impl::from_functional_tensor(grad);
      } else {
        grad_ = grad;
      }
      
      at::Tensor x1_;
      if (at::functionalization::impl::isFunctionalTensor(x1)) {
        at::functionalization::impl::sync(x1);
        x1_ = at::functionalization::impl::from_functional_tensor(x1);
      } else {
        x1_ = x1;
      }
      
      at::Tensor x2_;
      if (at::functionalization::impl::isFunctionalTensor(x2)) {
        at::functionalization::impl::sync(x2);
        x2_ = at::functionalization::impl::from_functional_tensor(x2);
      } else {
        x2_ = x2;
      }
      
      at::Tensor cdist_;
      if (at::functionalization::impl::isFunctionalTensor(cdist)) {
        at::functionalization::impl::sync(cdist);
        cdist_ = at::functionalization::impl::from_functional_tensor(cdist);
      } else {
        cdist_ = cdist;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad.device().type() == c10::DeviceType::XLA || x1.device().type() == c10::DeviceType::XLA || x2.device().type() == c10::DeviceType::XLA || cdist.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad) || at::functionalization::impl::isFunctionalTensor(x1) || at::functionalization::impl::isFunctionalTensor(x2) || at::functionalization::impl::isFunctionalTensor(cdist))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_cdist_backward_out::call(grad_, x1_, x2_, p, cdist_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_cdist_backward::call(grad_, x1_, x2_, p, cdist_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _pdist_forward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_pdist_forward_out::call(self_meta, p, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_pdist_forward_out::call(self_, p, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_pdist_forward::call(self_, p);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _pdist_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_meta = to_meta(grad);
        auto self_meta = to_meta(self);
        auto pdist_meta = to_meta(pdist);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_pdist_backward_out::call(grad_meta, self_meta, p, pdist_meta, out_meta);
      }
      
      at::Tensor grad_;
      if (at::functionalization::impl::isFunctionalTensor(grad)) {
        at::functionalization::impl::sync(grad);
        grad_ = at::functionalization::impl::from_functional_tensor(grad);
      } else {
        grad_ = grad;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor pdist_;
      if (at::functionalization::impl::isFunctionalTensor(pdist)) {
        at::functionalization::impl::sync(pdist);
        pdist_ = at::functionalization::impl::from_functional_tensor(pdist);
      } else {
        pdist_ = pdist;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA || pdist.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(pdist))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_pdist_backward_out::call(grad_, self_, p, pdist_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_pdist_backward::call(grad_, self_, p, pdist_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & pixel_shuffle_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t upscale_factor, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::pixel_shuffle_out::call(self_meta, upscale_factor, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::pixel_shuffle_out::call(self_, upscale_factor, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::pixel_shuffle::call(self_, upscale_factor);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & pixel_unshuffle_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t downscale_factor, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::pixel_unshuffle_out::call(self_meta, downscale_factor, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::pixel_unshuffle_out::call(self_, downscale_factor, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::pixel_unshuffle::call(self_, downscale_factor);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & channel_shuffle_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt groups, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::channel_shuffle_out::call(self_meta, groups, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::channel_shuffle_out::call(self_, groups, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::channel_shuffle::call(self_, groups);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _pin_memory_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Device> device, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_pin_memory_out::call(self_meta, device, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_pin_memory_out::call(self_, device, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_pin_memory::call(self_, device);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & rad2deg_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::rad2deg_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::rad2deg_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::rad2deg::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & rad2deg_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::rad2deg_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::rad2deg_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::rad2deg::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & deg2rad_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::deg2rad_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::deg2rad_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::deg2rad::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & deg2rad_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::deg2rad_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::deg2rad_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::deg2rad::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & scalar_tensor_out_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & s, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::scalar_tensor_out::call(s, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::scalar_tensor_out::call(s, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::scalar_tensor::call(s, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & rand_out_names_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::rand_names_out::call(size, names, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::rand_names_out::call(size, names, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::rand_names::call(size, names, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & rand_out_generator_with_names_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::rand_generator_with_names_out::call(size, generator, names, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::rand_generator_with_names_out::call(size, generator, names, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::rand_generator_with_names::call(size, generator, names, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & rand_out_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::rand_out::call(size, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::rand_out::call(size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::rand::call(size, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & rand_out_generator_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::rand_generator_out::call(size, generator, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::rand_generator_out::call(size, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::rand_generator::call(size, generator, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & rand_like_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::rand_like_out::call(self_meta, memory_format, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::rand_like_out::call(self_, memory_format, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::rand_like::call(self_, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt, memory_format);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & randint_out_out(c10::DispatchKeySet dispatchKeySet, c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::randint_out::call(high, size, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::randint_out::call(high, size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::randint::call(high, size, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & randint_out_generator_out(c10::DispatchKeySet dispatchKeySet, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::randint_generator_out::call(high, size, generator, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::randint_generator_out::call(high, size, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::randint_generator::call(high, size, generator, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & randint_out_low_out(c10::DispatchKeySet dispatchKeySet, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::randint_low_out::call(low, high, size, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::randint_low_out::call(low, high, size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::randint_low::call(low, high, size, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & randint_out_low_generator_out(c10::DispatchKeySet dispatchKeySet, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::randint_low_generator_out::call(low, high, size, generator, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::randint_low_generator_out::call(low, high, size, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::randint_low_generator::call(low, high, size, generator, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & randint_like_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::randint_like_out::call(self_meta, high, memory_format, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::randint_like_out::call(self_, high, memory_format, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::randint_like::call(self_, high, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt, memory_format);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & randint_like_out_low_dtype_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt low, c10::SymInt high, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::randint_like_low_dtype_out::call(self_meta, low, high, memory_format, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::randint_like_low_dtype_out::call(self_, low, high, memory_format, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::randint_like_low_dtype::call(self_, low, high, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt, memory_format);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & randn_out_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::randn_out::call(size, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::randn_out::call(size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::randn::call(size, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & randn_out_generator_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::randn_generator_out::call(size, generator, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::randn_generator_out::call(size, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::randn_generator::call(size, generator, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & randn_out_names_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::randn_names_out::call(size, names, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::randn_names_out::call(size, names, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::randn_names::call(size, names, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & randn_out_generator_with_names_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::randn_generator_with_names_out::call(size, generator, names, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::randn_generator_with_names_out::call(size, generator, names, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::randn_generator_with_names::call(size, generator, names, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & randn_like_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::randn_like_out::call(self_meta, memory_format, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::randn_like_out::call(self_, memory_format, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::randn_like::call(self_, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt, memory_format);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & randperm_out_out(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::randperm_out::call(n, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::randperm_out::call(n, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::randperm::call(n, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & randperm_out_generator_out(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::randperm_generator_out::call(n, generator, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::randperm_generator_out::call(n, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::randperm_generator::call(n, generator, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & range_out_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::range_out::call(start, end, step, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::range_out::call(start, end, step, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::range_step::call(start, end, step, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & range_out_out_(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::range_out_::call(start, end, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::range_out_::call(start, end, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::range::call(start, end, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & reciprocal_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::reciprocal_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::reciprocal_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::reciprocal::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & reciprocal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::reciprocal_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::reciprocal_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::reciprocal::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & neg_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::neg_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::neg_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::neg::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & neg_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::neg_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::neg_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::neg::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & negative_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::negative_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::negative_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::negative::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & negative_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::negative_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::negative_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::negative::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & repeat_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef repeats, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::repeat_out::call(self_meta, repeats, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::repeat_out::call(self_, repeats, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::repeat::call(self_, repeats);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & repeat_interleave_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & repeats, ::std::optional<c10::SymInt> output_size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto repeats_meta = to_meta(repeats);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::repeat_interleave_Tensor_out::call(repeats_meta, output_size, out_meta);
      }
      
      at::Tensor repeats_;
      if (at::functionalization::impl::isFunctionalTensor(repeats)) {
        at::functionalization::impl::sync(repeats);
        repeats_ = at::functionalization::impl::from_functional_tensor(repeats);
      } else {
        repeats_ = repeats;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || repeats.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(repeats))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::repeat_interleave_Tensor_out::call(repeats_, output_size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::repeat_interleave_Tensor::call(repeats_, output_size);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _mkldnn_reshape_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef shape, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_mkldnn_reshape_out::call(self_meta, shape, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_mkldnn_reshape_out::call(self_, shape, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_mkldnn_reshape::call(self_, shape);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & round_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::round_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::round_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::round::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & round_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::round_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::round_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::round::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & round_out_decimals_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t decimals, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::round_decimals_out::call(self_meta, decimals, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::round_decimals_out::call(self_, decimals, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::round_decimals::call(self_, decimals);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & round__decimals(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t decimals) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::round__decimals::call(self_meta, decimals);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::round__decimals::call(self_, decimals);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::round_decimals::call(self_, decimals);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & relu_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::relu_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::relu_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::relu::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & relu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::relu_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::relu_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::relu::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & gelu_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view approximate, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::gelu_out::call(self_meta, approximate, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::gelu_out::call(self_, approximate, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::gelu::call(self_, approximate);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & gelu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::string_view approximate) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::gelu_::call(self_meta, approximate);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::gelu_::call(self_, approximate);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::gelu::call(self_, approximate);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & gelu_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::gelu_backward_grad_input::call(grad_output_meta, self_meta, approximate, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::gelu_backward_grad_input::call(grad_output_, self_, approximate, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::gelu_backward::call(grad_output_, self_, approximate);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & hardshrink_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::hardshrink_out::call(self_meta, lambd, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::hardshrink_out::call(self_, lambd, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::hardshrink::call(self_, lambd);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & hardshrink_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_out_meta = to_meta(grad_out);
        auto self_meta = to_meta(self);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::hardshrink_backward_grad_input::call(grad_out_meta, self_meta, lambd, grad_input_meta);
      }
      
      at::Tensor grad_out_;
      if (at::functionalization::impl::isFunctionalTensor(grad_out)) {
        at::functionalization::impl::sync(grad_out);
        grad_out_ = at::functionalization::impl::from_functional_tensor(grad_out);
      } else {
        grad_out_ = grad_out;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_out.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_out) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::hardshrink_backward_grad_input::call(grad_out_, self_, lambd, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::hardshrink_backward::call(grad_out_, self_, lambd);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & rsqrt_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::rsqrt_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::rsqrt_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::rsqrt::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & rsqrt_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::rsqrt_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::rsqrt_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::rsqrt::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & select_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::select_backward_out::call(grad_output_meta, input_sizes, dim, index, out_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::select_backward_out::call(grad_output_, input_sizes, dim, index, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::select_backward::call(grad_output_, input_sizes, dim, index);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & celu_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & alpha, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::celu_out::call(self_meta, alpha, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::celu_out::call(self_, alpha, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::celu::call(self_, alpha);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & celu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & alpha) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::celu_::call(self_meta, alpha);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::celu_::call(self_, alpha);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::celu::call(self_, alpha);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & silu_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::silu_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::silu_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::silu::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & silu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::silu_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::silu_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::silu::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & silu_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::silu_backward_grad_input::call(grad_output_meta, self_meta, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::silu_backward_grad_input::call(grad_output_, self_, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::silu_backward::call(grad_output_, self_);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & mish_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mish_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mish_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mish::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & mish_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mish_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mish_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mish::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & sigmoid_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sigmoid_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sigmoid_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sigmoid::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & sigmoid_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sigmoid_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sigmoid_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sigmoid::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & logit_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<double> eps, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::logit_out::call(self_meta, eps, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::logit_out::call(self_, eps, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::logit::call(self_, eps);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & logit_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, ::std::optional<double> eps) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::logit_::call(self_meta, eps);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::logit_::call(self_, eps);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::logit::call(self_, eps);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & sin_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sin_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sin_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sin::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & sin_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sin_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sin_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sin::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & sinc_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sinc_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sinc_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sinc::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & sinc_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sinc_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sinc_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sinc::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & sinh_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sinh_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sinh_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sinh::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & sinh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sinh_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sinh_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sinh::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & slice_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::slice_backward_out::call(grad_output_meta, input_sizes, dim, start, end, step, out_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::slice_backward_out::call(grad_output_, input_sizes, dim, start, end, step, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::slice_backward::call(grad_output_, input_sizes, dim, start, end, step);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & slice_scatter_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim, ::std::optional<c10::SymInt> start, ::std::optional<c10::SymInt> end, c10::SymInt step, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto src_meta = to_meta(src);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::slice_scatter_out::call(self_meta, src_meta, dim, start, end, step, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor src_;
      if (at::functionalization::impl::isFunctionalTensor(src)) {
        at::functionalization::impl::sync(src);
        src_ = at::functionalization::impl::from_functional_tensor(src);
      } else {
        src_ = src;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || src.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(src))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::slice_scatter_out::call(self_, src_, dim, start, end, step, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::slice_scatter::call(self_, src_, dim, start, end, step);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & select_scatter_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto src_meta = to_meta(src);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::select_scatter_out::call(self_meta, src_meta, dim, index, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor src_;
      if (at::functionalization::impl::isFunctionalTensor(src)) {
        at::functionalization::impl::sync(src);
        src_ = at::functionalization::impl::from_functional_tensor(src);
      } else {
        src_ = src;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || src.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(src))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::select_scatter_out::call(self_, src_, dim, index, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::select_scatter::call(self_, src_, dim, index);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & diagonal_scatter_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto src_meta = to_meta(src);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::diagonal_scatter_out::call(self_meta, src_meta, offset, dim1, dim2, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor src_;
      if (at::functionalization::impl::isFunctionalTensor(src)) {
        at::functionalization::impl::sync(src);
        src_ = at::functionalization::impl::from_functional_tensor(src);
      } else {
        src_ = src;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || src.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(src))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::diagonal_scatter_out::call(self_, src_, offset, dim1, dim2, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::diagonal_scatter::call(self_, src_, offset, dim1, dim2);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & as_strided_scatter_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto src_meta = to_meta(src);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::as_strided_scatter_out::call(self_meta, src_meta, size, stride, storage_offset, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor src_;
      if (at::functionalization::impl::isFunctionalTensor(src)) {
        at::functionalization::impl::sync(src);
        src_ = at::functionalization::impl::from_functional_tensor(src);
      } else {
        src_ = src;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || src.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(src))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::as_strided_scatter_out::call(self_, src_, size, stride, storage_offset, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::as_strided_scatter::call(self_, src_, size, stride, storage_offset);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & softmax_out_int_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::softmax_int_out::call(self_meta, dim, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::softmax_int_out::call(self_, dim, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::softmax_int::call(self_, dim, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _softmax_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_softmax_out::call(self_meta, dim, half_to_float, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_softmax_out::call(self_, dim, half_to_float, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_softmax::call(self_, dim, half_to_float);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _softmax_backward_data_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto output_meta = to_meta(output);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_softmax_backward_data_out::call(grad_output_meta, output_meta, dim, input_dtype, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor output_;
      if (at::functionalization::impl::isFunctionalTensor(output)) {
        at::functionalization::impl::sync(output);
        output_ = at::functionalization::impl::from_functional_tensor(output);
      } else {
        output_ = output;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || output.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(output))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_softmax_backward_data_out::call(grad_output_, output_, dim, input_dtype, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_softmax_backward_data::call(grad_output_, output_, dim, input_dtype);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    void unsafe_split_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::unsafe_split_Tensor_out::call(self_meta, split_size, dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::unsafe_split_Tensor_out::call(self_, split_size, dim, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::unsafe_split_Tensor::call(self_, split_size, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void unsafe_split_with_sizes_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::unsafe_split_with_sizes_out::call(self_meta, split_sizes, dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::unsafe_split_with_sizes_out::call(self_, split_sizes, dim, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::unsafe_split_with_sizes::call(self_, split_sizes, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    at::Tensor & sspaddmm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto mat1_meta = to_meta(mat1);
        auto mat2_meta = to_meta(mat2);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sspaddmm_out::call(self_meta, mat1_meta, mat2_meta, beta, alpha, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor mat1_;
      if (at::functionalization::impl::isFunctionalTensor(mat1)) {
        at::functionalization::impl::sync(mat1);
        mat1_ = at::functionalization::impl::from_functional_tensor(mat1);
      } else {
        mat1_ = mat1;
      }
      
      at::Tensor mat2_;
      if (at::functionalization::impl::isFunctionalTensor(mat2)) {
        at::functionalization::impl::sync(mat2);
        mat2_ = at::functionalization::impl::from_functional_tensor(mat2);
      } else {
        mat2_ = mat2;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || mat1.device().type() == c10::DeviceType::XLA || mat2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mat1) || at::functionalization::impl::isFunctionalTensor(mat2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sspaddmm_out::call(self_, mat1_, mat2_, beta, alpha, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sspaddmm::call(self_, mat1_, mat2_, beta, alpha);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _chunk_cat_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, int64_t num_chunks, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto tensors_meta = to_meta(tensors);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_chunk_cat_out::call(tensors_meta, dim, num_chunks, out_meta);
      }
      
      ::std::vector<at::Tensor> tensors_;
      if (at::functionalization::impl::isFunctionalTensor(tensors)) {
        at::functionalization::impl::sync(tensors);
        tensors_ = at::functionalization::impl::from_functional_tensor(tensors);
      } else {
        tensors_ = tensors.vec();
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(tensors))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_chunk_cat_out::call(tensors_, dim, num_chunks, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_chunk_cat::call(tensors_, dim, num_chunks);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & stack_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto tensors_meta = to_meta(tensors);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::stack_out::call(tensors_meta, dim, out_meta);
      }
      
      ::std::vector<at::Tensor> tensors_;
      if (at::functionalization::impl::isFunctionalTensor(tensors)) {
        at::functionalization::impl::sync(tensors);
        tensors_ = at::functionalization::impl::from_functional_tensor(tensors);
      } else {
        tensors_ = tensors.vec();
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(tensors))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::stack_out::call(tensors_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::stack::call(tensors_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _stack_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto tensors_meta = to_meta(tensors);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_stack_out::call(tensors_meta, dim, out_meta);
      }
      
      ::std::vector<at::Tensor> tensors_;
      if (at::functionalization::impl::isFunctionalTensor(tensors)) {
        at::functionalization::impl::sync(tensors);
        tensors_ = at::functionalization::impl::from_functional_tensor(tensors);
      } else {
        tensors_ = tensors.vec();
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(tensors))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_stack_out::call(tensors_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_stack::call(tensors_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & hstack_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto tensors_meta = to_meta(tensors);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::hstack_out::call(tensors_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> tensors_;
      if (at::functionalization::impl::isFunctionalTensor(tensors)) {
        at::functionalization::impl::sync(tensors);
        tensors_ = at::functionalization::impl::from_functional_tensor(tensors);
      } else {
        tensors_ = tensors.vec();
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(tensors))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::hstack_out::call(tensors_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::hstack::call(tensors_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & vstack_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto tensors_meta = to_meta(tensors);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::vstack_out::call(tensors_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> tensors_;
      if (at::functionalization::impl::isFunctionalTensor(tensors)) {
        at::functionalization::impl::sync(tensors);
        tensors_ = at::functionalization::impl::from_functional_tensor(tensors);
      } else {
        tensors_ = tensors.vec();
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(tensors))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::vstack_out::call(tensors_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::vstack::call(tensors_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & dstack_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto tensors_meta = to_meta(tensors);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::dstack_out::call(tensors_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> tensors_;
      if (at::functionalization::impl::isFunctionalTensor(tensors)) {
        at::functionalization::impl::sync(tensors);
        tensors_ = at::functionalization::impl::from_functional_tensor(tensors);
      } else {
        tensors_ = tensors.vec();
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(tensors))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::dstack_out::call(tensors_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::dstack::call(tensors_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & sum_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sum_out::call(self_meta, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sum_out::call(self_, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sum::call(self_, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & sum_out_IntList_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sum_IntList_out::call(self_meta, dim, keepdim, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sum_IntList_out::call(self_, dim, keepdim, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sum_dim_IntList::call(self_, dim, keepdim, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & sum_out_DimnameList_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sum_DimnameList_out::call(self_meta, dim, keepdim, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sum_DimnameList_out::call(self_, dim, keepdim, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sum_dim_DimnameList::call(self_, dim, keepdim, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & nansum_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::nansum_out::call(self_meta, dim, keepdim, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::nansum_out::call(self_, dim, keepdim, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::nansum::call(self_, dim, keepdim, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & sqrt_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sqrt_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sqrt_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sqrt::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & sqrt_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sqrt_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sqrt_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sqrt::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & square_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::square_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::square_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::square::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & square_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::square_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::square_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::square::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & std_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::std_out::call(self_meta, dim, unbiased, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::std_out::call(self_, dim, unbiased, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::std_dim::call(self_, dim, unbiased, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & std_out_correction_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::std_correction_out::call(self_meta, dim, correction, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::std_correction_out::call(self_, dim, correction, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::std_correction::call(self_, dim, correction, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> std_mean_out_correction_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::std_mean_correction_out::call(self_meta, dim, correction, keepdim, out0_meta, out1_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::std_mean_correction_out::call(self_, dim, correction, keepdim, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::std_mean_correction::call(self_, dim, correction, keepdim);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    at::Tensor & std_out_names_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::std_names_out::call(self_meta, dim, unbiased, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::std_names_out::call(self_, dim, unbiased, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::std_names_dim::call(self_, dim, unbiased, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & std_out_correction_names_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, const ::std::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::std_correction_names_out::call(self_meta, dim, correction, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::std_correction_names_out::call(self_, dim, correction, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::std_correction_names::call(self_, dim, correction, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & prod_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::prod_out::call(self_meta, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::prod_out::call(self_, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::prod::call(self_, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & prod_out_int_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::prod_int_out::call(self_meta, dim, keepdim, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::prod_int_out::call(self_, dim, keepdim, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::prod_dim_int::call(self_, dim, keepdim, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & prod_out_Dimname_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::prod_Dimname_out::call(self_meta, dim, keepdim, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::prod_Dimname_out::call(self_, dim, keepdim, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::prod_dim_Dimname::call(self_, dim, keepdim, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & tan_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::tan_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::tan_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::tan::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & tan_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::tan_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::tan_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::tan::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & tanh_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::tanh_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::tanh_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::tanh::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & tanh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::tanh_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::tanh_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::tanh::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & tensordot_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::tensordot_out::call(self_meta, other_meta, dims_self, dims_other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::tensordot_out::call(self_, other_, dims_self, dims_other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::tensordot::call(self_, other_, dims_self, dims_other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & threshold_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::threshold_out::call(self_meta, threshold, value, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::threshold_out::call(self_, threshold, value, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::threshold::call(self_, threshold, value);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & threshold_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::threshold_::call(self_meta, threshold, value);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::threshold_::call(self_, threshold, value);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::threshold::call(self_, threshold, value);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & threshold_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::threshold_backward_grad_input::call(grad_output_meta, self_meta, threshold, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::threshold_backward_grad_input::call(grad_output_, self_, threshold, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::threshold_backward::call(grad_output_, self_, threshold);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & _mkldnn_transpose_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_mkldnn_transpose_out::call(self_meta, dim0, dim1, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_mkldnn_transpose_out::call(self_, dim0, dim1, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_mkldnn_transpose::call(self_, dim0, dim1);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _mkldnn_transpose_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim0, int64_t dim1) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_mkldnn_transpose_::call(self_meta, dim0, dim1);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_mkldnn_transpose_::call(self_, dim0, dim1);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_mkldnn_transpose::call(self_, dim0, dim1);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & flip_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::flip_out::call(self_meta, dims, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::flip_out::call(self_, dims, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::flip::call(self_, dims);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & roll_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef shifts, at::IntArrayRef dims, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::roll_out::call(self_meta, shifts, dims, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::roll_out::call(self_, shifts, dims, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::roll::call(self_, shifts, dims);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & rot90_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, at::IntArrayRef dims, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::rot90_out::call(self_meta, k, dims, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::rot90_out::call(self_, k, dims, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::rot90::call(self_, k, dims);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _transform_bias_rescale_qkv_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto qkv_meta = to_meta(qkv);
        auto qkv_bias_meta = to_meta(qkv_bias);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_transform_bias_rescale_qkv_out::call(qkv_meta, qkv_bias_meta, num_heads, out0_meta, out1_meta, out2_meta);
      }
      
      at::Tensor qkv_;
      if (at::functionalization::impl::isFunctionalTensor(qkv)) {
        at::functionalization::impl::sync(qkv);
        qkv_ = at::functionalization::impl::from_functional_tensor(qkv);
      } else {
        qkv_ = qkv;
      }
      
      at::Tensor qkv_bias_;
      if (at::functionalization::impl::isFunctionalTensor(qkv_bias)) {
        at::functionalization::impl::sync(qkv_bias);
        qkv_bias_ = at::functionalization::impl::from_functional_tensor(qkv_bias);
      } else {
        qkv_bias_ = qkv_bias;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || qkv.device().type() == c10::DeviceType::XLA || qkv_bias.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(qkv) || at::functionalization::impl::isFunctionalTensor(qkv_bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_transform_bias_rescale_qkv_out::call(qkv_, qkv_bias_, num_heads, out0_, out1_, out2_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_transform_bias_rescale_qkv::call(qkv_, qkv_bias_, num_heads);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
      }
    }

    at::Tensor & _nested_tensor_from_mask_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & t, const at::Tensor & mask, bool mask_check, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto t_meta = to_meta(t);
        auto mask_meta = to_meta(mask);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_nested_tensor_from_mask_out::call(t_meta, mask_meta, mask_check, out_meta);
      }
      
      at::Tensor t_;
      if (at::functionalization::impl::isFunctionalTensor(t)) {
        at::functionalization::impl::sync(t);
        t_ = at::functionalization::impl::from_functional_tensor(t);
      } else {
        t_ = t;
      }
      
      at::Tensor mask_;
      if (at::functionalization::impl::isFunctionalTensor(mask)) {
        at::functionalization::impl::sync(mask);
        mask_ = at::functionalization::impl::from_functional_tensor(mask);
      } else {
        mask_ = mask;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || t.device().type() == c10::DeviceType::XLA || mask.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(t) || at::functionalization::impl::isFunctionalTensor(mask))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_nested_tensor_from_mask_out::call(t_, mask_, mask_check, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_nested_tensor_from_mask::call(t_, mask_, mask_check);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _nested_from_padded_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto padded_meta = to_meta(padded);
        auto cpu_nested_shape_example_meta = to_meta(cpu_nested_shape_example);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_nested_from_padded_out::call(padded_meta, cpu_nested_shape_example_meta, fuse_transform_0213, out_meta);
      }
      
      at::Tensor padded_;
      if (at::functionalization::impl::isFunctionalTensor(padded)) {
        at::functionalization::impl::sync(padded);
        padded_ = at::functionalization::impl::from_functional_tensor(padded);
      } else {
        padded_ = padded;
      }
      
      at::Tensor cpu_nested_shape_example_;
      if (at::functionalization::impl::isFunctionalTensor(cpu_nested_shape_example)) {
        at::functionalization::impl::sync(cpu_nested_shape_example);
        cpu_nested_shape_example_ = at::functionalization::impl::from_functional_tensor(cpu_nested_shape_example);
      } else {
        cpu_nested_shape_example_ = cpu_nested_shape_example;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || padded.device().type() == c10::DeviceType::XLA || cpu_nested_shape_example.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(padded) || at::functionalization::impl::isFunctionalTensor(cpu_nested_shape_example))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_nested_from_padded_out::call(padded_, cpu_nested_shape_example_, fuse_transform_0213, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_nested_from_padded::call(padded_, cpu_nested_shape_example_, fuse_transform_0213);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _nested_tensor_size_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_nested_tensor_size_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_nested_tensor_size_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_nested_tensor_size::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _nested_tensor_strides_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_nested_tensor_strides_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_nested_tensor_strides_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_nested_tensor_strides::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _nested_tensor_storage_offsets_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_nested_tensor_storage_offsets_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_nested_tensor_storage_offsets_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_nested_tensor_storage_offsets::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _nested_from_padded_and_nested_example_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & padded, const at::Tensor & nt_example, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto padded_meta = to_meta(padded);
        auto nt_example_meta = to_meta(nt_example);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_nested_from_padded_and_nested_example_out::call(padded_meta, nt_example_meta, out_meta);
      }
      
      at::Tensor padded_;
      if (at::functionalization::impl::isFunctionalTensor(padded)) {
        at::functionalization::impl::sync(padded);
        padded_ = at::functionalization::impl::from_functional_tensor(padded);
      } else {
        padded_ = padded;
      }
      
      at::Tensor nt_example_;
      if (at::functionalization::impl::isFunctionalTensor(nt_example)) {
        at::functionalization::impl::sync(nt_example);
        nt_example_ = at::functionalization::impl::from_functional_tensor(nt_example);
      } else {
        nt_example_ = nt_example;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || padded.device().type() == c10::DeviceType::XLA || nt_example.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(padded) || at::functionalization::impl::isFunctionalTensor(nt_example))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_nested_from_padded_and_nested_example_out::call(padded_, nt_example_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_nested_from_padded_and_nested_example::call(padded_, nt_example_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _nested_view_from_buffer_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto nested_size_meta = to_meta(nested_size);
        auto nested_strides_meta = to_meta(nested_strides);
        auto offsets_meta = to_meta(offsets);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_nested_view_from_buffer_copy_out::call(self_meta, nested_size_meta, nested_strides_meta, offsets_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor nested_size_;
      if (at::functionalization::impl::isFunctionalTensor(nested_size)) {
        at::functionalization::impl::sync(nested_size);
        nested_size_ = at::functionalization::impl::from_functional_tensor(nested_size);
      } else {
        nested_size_ = nested_size;
      }
      
      at::Tensor nested_strides_;
      if (at::functionalization::impl::isFunctionalTensor(nested_strides)) {
        at::functionalization::impl::sync(nested_strides);
        nested_strides_ = at::functionalization::impl::from_functional_tensor(nested_strides);
      } else {
        nested_strides_ = nested_strides;
      }
      
      at::Tensor offsets_;
      if (at::functionalization::impl::isFunctionalTensor(offsets)) {
        at::functionalization::impl::sync(offsets);
        offsets_ = at::functionalization::impl::from_functional_tensor(offsets);
      } else {
        offsets_ = offsets;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || nested_size.device().type() == c10::DeviceType::XLA || nested_strides.device().type() == c10::DeviceType::XLA || offsets.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(nested_size) || at::functionalization::impl::isFunctionalTensor(nested_strides) || at::functionalization::impl::isFunctionalTensor(offsets))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_nested_view_from_buffer_copy_out::call(self_, nested_size_, nested_strides_, offsets_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_nested_view_from_buffer_copy::call(self_, nested_size_, nested_strides_, offsets_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _nested_view_from_jagged_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const ::std::optional<at::Tensor> & lengths, int64_t ragged_idx, const ::std::optional<at::Tensor> & min_seqlen, const ::std::optional<at::Tensor> & max_seqlen, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto offsets_meta = to_meta(offsets);
        auto dummy_meta = to_meta(dummy);
        auto lengths_meta = to_meta(lengths);
        auto min_seqlen_meta = to_meta(min_seqlen);
        auto max_seqlen_meta = to_meta(max_seqlen);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_nested_view_from_jagged_copy_out::call(self_meta, offsets_meta, dummy_meta, lengths_meta, ragged_idx, min_seqlen_meta, max_seqlen_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor offsets_;
      if (at::functionalization::impl::isFunctionalTensor(offsets)) {
        at::functionalization::impl::sync(offsets);
        offsets_ = at::functionalization::impl::from_functional_tensor(offsets);
      } else {
        offsets_ = offsets;
      }
      
      at::Tensor dummy_;
      if (at::functionalization::impl::isFunctionalTensor(dummy)) {
        at::functionalization::impl::sync(dummy);
        dummy_ = at::functionalization::impl::from_functional_tensor(dummy);
      } else {
        dummy_ = dummy;
      }
      
      ::std::optional<at::Tensor> lengths_;
      if (at::functionalization::impl::isFunctionalTensor(lengths)) {
        at::functionalization::impl::sync(lengths);
        lengths_ = at::functionalization::impl::from_functional_tensor(lengths);
      } else {
        lengths_ = lengths;
      }
      
      ::std::optional<at::Tensor> min_seqlen_;
      if (at::functionalization::impl::isFunctionalTensor(min_seqlen)) {
        at::functionalization::impl::sync(min_seqlen);
        min_seqlen_ = at::functionalization::impl::from_functional_tensor(min_seqlen);
      } else {
        min_seqlen_ = min_seqlen;
      }
      
      ::std::optional<at::Tensor> max_seqlen_;
      if (at::functionalization::impl::isFunctionalTensor(max_seqlen)) {
        at::functionalization::impl::sync(max_seqlen);
        max_seqlen_ = at::functionalization::impl::from_functional_tensor(max_seqlen);
      } else {
        max_seqlen_ = max_seqlen;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || offsets.device().type() == c10::DeviceType::XLA || dummy.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(offsets) || at::functionalization::impl::isFunctionalTensor(dummy) || at::functionalization::impl::isFunctionalTensor(lengths) || at::functionalization::impl::isFunctionalTensor(min_seqlen) || at::functionalization::impl::isFunctionalTensor(max_seqlen))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_nested_view_from_jagged_copy_out::call(self_, offsets_, dummy_, lengths_, ragged_idx, min_seqlen_, max_seqlen_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_nested_view_from_jagged_copy::call(self_, offsets_, dummy_, lengths_, ragged_idx, min_seqlen_, max_seqlen_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _nested_get_values_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_nested_get_values_copy_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_nested_get_values_copy_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_nested_get_values_copy::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _trilinear_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto i1_meta = to_meta(i1);
        auto i2_meta = to_meta(i2);
        auto i3_meta = to_meta(i3);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_trilinear_out::call(i1_meta, i2_meta, i3_meta, expand1, expand2, expand3, sumdim, unroll_dim, out_meta);
      }
      
      at::Tensor i1_;
      if (at::functionalization::impl::isFunctionalTensor(i1)) {
        at::functionalization::impl::sync(i1);
        i1_ = at::functionalization::impl::from_functional_tensor(i1);
      } else {
        i1_ = i1;
      }
      
      at::Tensor i2_;
      if (at::functionalization::impl::isFunctionalTensor(i2)) {
        at::functionalization::impl::sync(i2);
        i2_ = at::functionalization::impl::from_functional_tensor(i2);
      } else {
        i2_ = i2;
      }
      
      at::Tensor i3_;
      if (at::functionalization::impl::isFunctionalTensor(i3)) {
        at::functionalization::impl::sync(i3);
        i3_ = at::functionalization::impl::from_functional_tensor(i3);
      } else {
        i3_ = i3;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || i1.device().type() == c10::DeviceType::XLA || i2.device().type() == c10::DeviceType::XLA || i3.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(i1) || at::functionalization::impl::isFunctionalTensor(i2) || at::functionalization::impl::isFunctionalTensor(i3))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_trilinear_out::call(i1_, i2_, i3_, expand1, expand2, expand3, sumdim, unroll_dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_trilinear::call(i1_, i2_, i3_, expand1, expand2, expand3, sumdim, unroll_dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & trunc_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::trunc_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::trunc_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::trunc::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & trunc_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::trunc_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::trunc_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::trunc::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & fix_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fix_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fix_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fix::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fix_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fix_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fix_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fix::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> _unique_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted, bool return_inverse, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_unique_out::call(self_meta, sorted, return_inverse, out0_meta, out1_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_unique_out::call(self_, sorted, return_inverse, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_unique::call(self_, sorted, return_inverse);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_dim_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::unique_dim_out::call(self_meta, dim, sorted, return_inverse, return_counts, out0_meta, out1_meta, out2_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::unique_dim_out::call(self_, dim, sorted, return_inverse, return_counts, out0_, out1_, out2_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::unique_dim::call(self_, dim, sorted, return_inverse, return_counts);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_consecutive_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool return_inverse, bool return_counts, ::std::optional<int64_t> dim, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::unique_consecutive_out::call(self_meta, return_inverse, return_counts, dim, out0_meta, out1_meta, out2_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::unique_consecutive_out::call(self_, return_inverse, return_counts, dim, out0_, out1_, out2_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::unique_consecutive::call(self_, return_inverse, return_counts, dim);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_dim_consecutive_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::unique_dim_consecutive_out::call(self_meta, dim, return_inverse, return_counts, out0_meta, out1_meta, out2_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::unique_dim_consecutive_out::call(self_, dim, return_inverse, return_counts, out0_, out1_, out2_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::unique_dim_consecutive::call(self_, dim, return_inverse, return_counts);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _unique2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_unique2_out::call(self_meta, sorted, return_inverse, return_counts, out0_meta, out1_meta, out2_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_unique2_out::call(self_, sorted, return_inverse, return_counts, out0_, out1_, out2_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_unique2::call(self_, sorted, return_inverse, return_counts);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
      }
    }

    at::Tensor & _unsafe_view_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_unsafe_view_out::call(self_meta, size, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_unsafe_view_out::call(self_, size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_unsafe_view::call(self_, size);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & var_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::var_out::call(self_meta, dim, unbiased, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::var_out::call(self_, dim, unbiased, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::var_dim::call(self_, dim, unbiased, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & var_out_correction_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::var_correction_out::call(self_meta, dim, correction, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::var_correction_out::call(self_, dim, correction, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::var_correction::call(self_, dim, correction, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & var_out_names_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::var_names_out::call(self_meta, dim, unbiased, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::var_names_out::call(self_, dim, unbiased, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::var_names_dim::call(self_, dim, unbiased, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & var_out_correction_names_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, const ::std::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::var_correction_names_out::call(self_meta, dim, correction, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::var_correction_names_out::call(self_, dim, correction, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::var_correction_names::call(self_, dim, correction, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> var_mean_out_correction_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::var_mean_correction_out::call(self_meta, dim, correction, keepdim, out0_meta, out1_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::var_mean_correction_out::call(self_, dim, correction, keepdim, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::var_mean_correction::call(self_, dim, correction, keepdim);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    at::Tensor & where_out_self_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto condition_meta = to_meta(condition);
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::where_self_out::call(condition_meta, self_meta, other_meta, out_meta);
      }
      
      at::Tensor condition_;
      if (at::functionalization::impl::isFunctionalTensor(condition)) {
        at::functionalization::impl::sync(condition);
        condition_ = at::functionalization::impl::from_functional_tensor(condition);
      } else {
        condition_ = condition;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || condition.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(condition) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::where_self_out::call(condition_, self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::where_self::call(condition_, self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> _weight_norm_interface_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & v, const at::Tensor & g, int64_t dim, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto v_meta = to_meta(v);
        auto g_meta = to_meta(g);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_weight_norm_interface_out::call(v_meta, g_meta, dim, out0_meta, out1_meta);
      }
      
      at::Tensor v_;
      if (at::functionalization::impl::isFunctionalTensor(v)) {
        at::functionalization::impl::sync(v);
        v_ = at::functionalization::impl::from_functional_tensor(v);
      } else {
        v_ = v;
      }
      
      at::Tensor g_;
      if (at::functionalization::impl::isFunctionalTensor(g)) {
        at::functionalization::impl::sync(g);
        g_ = at::functionalization::impl::from_functional_tensor(g);
      } else {
        g_ = g;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || v.device().type() == c10::DeviceType::XLA || g.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(v) || at::functionalization::impl::isFunctionalTensor(g))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_weight_norm_interface_out::call(v_, g_, dim, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_weight_norm_interface::call(v_, g_, dim);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> _weight_norm_interface_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_w_meta = to_meta(grad_w);
        auto saved_v_meta = to_meta(saved_v);
        auto saved_g_meta = to_meta(saved_g);
        auto saved_norms_meta = to_meta(saved_norms);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_weight_norm_interface_backward_out::call(grad_w_meta, saved_v_meta, saved_g_meta, saved_norms_meta, dim, out0_meta, out1_meta);
      }
      
      at::Tensor grad_w_;
      if (at::functionalization::impl::isFunctionalTensor(grad_w)) {
        at::functionalization::impl::sync(grad_w);
        grad_w_ = at::functionalization::impl::from_functional_tensor(grad_w);
      } else {
        grad_w_ = grad_w;
      }
      
      at::Tensor saved_v_;
      if (at::functionalization::impl::isFunctionalTensor(saved_v)) {
        at::functionalization::impl::sync(saved_v);
        saved_v_ = at::functionalization::impl::from_functional_tensor(saved_v);
      } else {
        saved_v_ = saved_v;
      }
      
      at::Tensor saved_g_;
      if (at::functionalization::impl::isFunctionalTensor(saved_g)) {
        at::functionalization::impl::sync(saved_g);
        saved_g_ = at::functionalization::impl::from_functional_tensor(saved_g);
      } else {
        saved_g_ = saved_g;
      }
      
      at::Tensor saved_norms_;
      if (at::functionalization::impl::isFunctionalTensor(saved_norms)) {
        at::functionalization::impl::sync(saved_norms);
        saved_norms_ = at::functionalization::impl::from_functional_tensor(saved_norms);
      } else {
        saved_norms_ = saved_norms;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_w.device().type() == c10::DeviceType::XLA || saved_v.device().type() == c10::DeviceType::XLA || saved_g.device().type() == c10::DeviceType::XLA || saved_norms.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_w) || at::functionalization::impl::isFunctionalTensor(saved_v) || at::functionalization::impl::isFunctionalTensor(saved_g) || at::functionalization::impl::isFunctionalTensor(saved_norms))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_weight_norm_interface_backward_out::call(grad_w_, saved_v_, saved_g_, saved_norms_, dim, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_weight_norm_interface_backward::call(grad_w_, saved_v_, saved_g_, saved_norms_, dim);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    at::Tensor & zeros_out_names_out(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, ::std::optional<at::DimnameList> names, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::zeros_names_out::call(size, names, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::zeros_names_out::call(size, names, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::zeros_names::call(size, names, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _efficientzerotensor_out_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_efficientzerotensor_out::call(size, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_efficientzerotensor_out::call(size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_efficientzerotensor::call(size, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & zeros_out_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::zeros_out::call(size, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::zeros_out::call(size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::zeros::call(size, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & zeros_like_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::zeros_like_out::call(self_meta, memory_format, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::zeros_like_out::call(self_, memory_format, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::zeros_like::call(self_, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt, memory_format);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _standard_gamma_grad_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & output, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto output_meta = to_meta(output);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_standard_gamma_grad_out::call(self_meta, output_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor output_;
      if (at::functionalization::impl::isFunctionalTensor(output)) {
        at::functionalization::impl::sync(output);
        output_ = at::functionalization::impl::from_functional_tensor(output);
      } else {
        output_ = output;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || output.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(output))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_standard_gamma_grad_out::call(self_, output_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_standard_gamma_grad::call(self_, output_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _standard_gamma_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_standard_gamma_out::call(self_meta, generator, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_standard_gamma_out::call(self_, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_standard_gamma::call(self_, generator);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _dirichlet_grad_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto alpha_meta = to_meta(alpha);
        auto total_meta = to_meta(total);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_dirichlet_grad_out::call(x_meta, alpha_meta, total_meta, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor alpha_;
      if (at::functionalization::impl::isFunctionalTensor(alpha)) {
        at::functionalization::impl::sync(alpha);
        alpha_ = at::functionalization::impl::from_functional_tensor(alpha);
      } else {
        alpha_ = alpha;
      }
      
      at::Tensor total_;
      if (at::functionalization::impl::isFunctionalTensor(total)) {
        at::functionalization::impl::sync(total);
        total_ = at::functionalization::impl::from_functional_tensor(total);
      } else {
        total_ = total;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA || alpha.device().type() == c10::DeviceType::XLA || total.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(alpha) || at::functionalization::impl::isFunctionalTensor(total))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_dirichlet_grad_out::call(x_, alpha_, total_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_dirichlet_grad::call(x_, alpha_, total_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _sample_dirichlet_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_sample_dirichlet_out::call(self_meta, generator, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_sample_dirichlet_out::call(self_, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_sample_dirichlet::call(self_, generator);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & poisson_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::poisson_out::call(self_meta, generator, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::poisson_out::call(self_, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::poisson::call(self_, generator);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & binomial_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & count, const at::Tensor & prob, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto count_meta = to_meta(count);
        auto prob_meta = to_meta(prob);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::binomial_out::call(count_meta, prob_meta, generator, out_meta);
      }
      
      at::Tensor count_;
      if (at::functionalization::impl::isFunctionalTensor(count)) {
        at::functionalization::impl::sync(count);
        count_ = at::functionalization::impl::from_functional_tensor(count);
      } else {
        count_ = count;
      }
      
      at::Tensor prob_;
      if (at::functionalization::impl::isFunctionalTensor(prob)) {
        at::functionalization::impl::sync(prob);
        prob_ = at::functionalization::impl::from_functional_tensor(prob);
      } else {
        prob_ = prob;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || count.device().type() == c10::DeviceType::XLA || prob.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(count) || at::functionalization::impl::isFunctionalTensor(prob))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::binomial_out::call(count_, prob_, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::binomial::call(count_, prob_, generator);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & native_norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::native_norm_out::call(self_meta, p, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::native_norm_out::call(self_, p, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::native_norm::call(self_, p);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & native_norm_out_ScalarOpt_dim_dtype_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::native_norm_ScalarOpt_dim_dtype_out::call(self_meta, p, dim, keepdim, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::native_norm_ScalarOpt_dim_dtype_out::call(self_, p, dim, keepdim, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::native_norm_ScalarOpt_dim_dtype::call(self_, p, dim, keepdim, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _batch_norm_with_update_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, at::Tensor & reserve) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto running_mean_meta = to_meta(running_mean);
        auto running_var_meta = to_meta(running_var);
        auto out_meta = to_meta(out);
        auto save_mean_meta = to_meta(save_mean);
        auto save_invstd_meta = to_meta(save_invstd);
        auto reserve_meta = to_meta(reserve);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_batch_norm_with_update_out::call(input_meta, weight_meta, bias_meta, running_mean_meta, running_var_meta, momentum, eps, out_meta, save_mean_meta, save_invstd_meta, reserve_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor running_mean_;
      if (at::functionalization::impl::isFunctionalTensor(running_mean)) {
        at::functionalization::impl::sync(running_mean);
        running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean);
      } else {
        running_mean_ = running_mean;
      }
      
      at::Tensor running_var_;
      if (at::functionalization::impl::isFunctionalTensor(running_var)) {
        at::functionalization::impl::sync(running_var);
        running_var_ = at::functionalization::impl::from_functional_tensor(running_var);
      } else {
        running_var_ = running_var;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      
      at::Tensor save_mean_;
      if (at::functionalization::impl::isFunctionalTensor(save_mean)) {
        at::functionalization::impl::sync(save_mean);
        save_mean_ = at::functionalization::impl::from_functional_tensor(save_mean);
      } else {
        save_mean_ = save_mean;
      }
      
      at::Tensor save_invstd_;
      if (at::functionalization::impl::isFunctionalTensor(save_invstd)) {
        at::functionalization::impl::sync(save_invstd);
        save_invstd_ = at::functionalization::impl::from_functional_tensor(save_invstd);
      } else {
        save_invstd_ = save_invstd;
      }
      
      at::Tensor reserve_;
      if (at::functionalization::impl::isFunctionalTensor(reserve)) {
        at::functionalization::impl::sync(reserve);
        reserve_ = at::functionalization::impl::from_functional_tensor(reserve);
      } else {
        reserve_ = reserve;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(running_mean) && at::functionalization::impl::isFunctionalTensor(running_var) && at::functionalization::impl::isFunctionalTensor(out) && at::functionalization::impl::isFunctionalTensor(save_mean) && at::functionalization::impl::isFunctionalTensor(save_invstd) && at::functionalization::impl::isFunctionalTensor(reserve))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_batch_norm_with_update_out::call(input_, weight_, bias_, running_mean_, running_var_, momentum, eps, out_, save_mean_, save_invstd_, reserve_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out, save_mean, save_invstd, reserve);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_batch_norm_with_update_functional::call(input_, weight_, bias_, running_mean_, running_var_, momentum, eps);
        }
          auto running_mean_inner = at::functionalization::impl::from_functional_tensor(running_mean);
  at::functionalization::impl::replace_(running_mean, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(running_mean);
  at::functionalization::impl::sync(running_mean);
  auto running_mean_inner_updated = at::functionalization::impl::from_functional_tensor(running_mean);
  at::functionalization::impl::propagate_xla_data_direct(running_mean_inner, running_mean_inner_updated);
  auto running_var_inner = at::functionalization::impl::from_functional_tensor(running_var);
  at::functionalization::impl::replace_(running_var, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(running_var);
  at::functionalization::impl::sync(running_var);
  auto running_var_inner_updated = at::functionalization::impl::from_functional_tensor(running_var);
  at::functionalization::impl::propagate_xla_data_direct(running_var_inner, running_var_inner_updated);
  auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
  auto save_mean_inner = at::functionalization::impl::from_functional_tensor(save_mean);
  at::functionalization::impl::replace_(save_mean, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(save_mean);
  at::functionalization::impl::sync(save_mean);
  auto save_mean_inner_updated = at::functionalization::impl::from_functional_tensor(save_mean);
  at::functionalization::impl::propagate_xla_data_direct(save_mean_inner, save_mean_inner_updated);
  auto save_invstd_inner = at::functionalization::impl::from_functional_tensor(save_invstd);
  at::functionalization::impl::replace_(save_invstd, std::get<4>(tmp_output));
  at::functionalization::impl::commit_update(save_invstd);
  at::functionalization::impl::sync(save_invstd);
  auto save_invstd_inner_updated = at::functionalization::impl::from_functional_tensor(save_invstd);
  at::functionalization::impl::propagate_xla_data_direct(save_invstd_inner, save_invstd_inner_updated);
  auto reserve_inner = at::functionalization::impl::from_functional_tensor(reserve);
  at::functionalization::impl::replace_(reserve, std::get<5>(tmp_output));
  at::functionalization::impl::commit_update(reserve);
  at::functionalization::impl::sync(reserve);
  auto reserve_inner_updated = at::functionalization::impl::from_functional_tensor(reserve);
  at::functionalization::impl::propagate_xla_data_direct(reserve_inner, reserve_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out, save_mean, save_invstd, reserve);
      }
    }

    ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _batch_norm_with_update(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, double momentum, double eps) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto running_mean_meta = to_meta(running_mean);
        auto running_var_meta = to_meta(running_var);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_batch_norm_with_update::call(input_meta, weight_meta, bias_meta, running_mean_meta, running_var_meta, momentum, eps);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor running_mean_;
      if (at::functionalization::impl::isFunctionalTensor(running_mean)) {
        at::functionalization::impl::sync(running_mean);
        running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean);
      } else {
        running_mean_ = running_mean;
      }
      
      at::Tensor running_var_;
      if (at::functionalization::impl::isFunctionalTensor(running_var)) {
        at::functionalization::impl::sync(running_var);
        running_var_ = at::functionalization::impl::from_functional_tensor(running_var);
      } else {
        running_var_ = running_var;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(running_mean) && at::functionalization::impl::isFunctionalTensor(running_var))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_batch_norm_with_update::call(input_, weight_, bias_, running_mean_, running_var_, momentum, eps);
         return ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor>(std::get<0>(tmp_output), std::get<1>(tmp_output), std::get<2>(tmp_output), std::get<3>(tmp_output));
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_batch_norm_with_update_functional::call(input_, weight_, bias_, running_mean_, running_var_, momentum, eps);
        }
          auto output_0 = at::functionalization::impl::to_functional_tensor(std::get<0>(tmp_output));
  auto output_1 = at::functionalization::impl::to_functional_tensor(std::get<1>(tmp_output));
  auto output_2 = at::functionalization::impl::to_functional_tensor(std::get<2>(tmp_output));
  auto output_3 = at::functionalization::impl::to_functional_tensor(std::get<3>(tmp_output));
  auto running_mean_inner = at::functionalization::impl::from_functional_tensor(running_mean);
  at::functionalization::impl::replace_(running_mean, std::get<4>(tmp_output));
  at::functionalization::impl::commit_update(running_mean);
  at::functionalization::impl::sync(running_mean);
  auto running_mean_inner_updated = at::functionalization::impl::from_functional_tensor(running_mean);
  at::functionalization::impl::propagate_xla_data_direct(running_mean_inner, running_mean_inner_updated);
  auto running_var_inner = at::functionalization::impl::from_functional_tensor(running_var);
  at::functionalization::impl::replace_(running_var, std::get<5>(tmp_output));
  at::functionalization::impl::commit_update(running_var);
  at::functionalization::impl::sync(running_var);
  auto running_var_inner_updated = at::functionalization::impl::from_functional_tensor(running_var);
  at::functionalization::impl::propagate_xla_data_direct(running_var_inner, running_var_inner_updated);
    return ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor>(output_0, output_1, output_2, output_3);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _batch_norm_no_update_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, double momentum, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto running_mean_meta = to_meta(running_mean);
        auto running_var_meta = to_meta(running_var);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        auto out3_meta = to_meta(out3);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_batch_norm_no_update_out::call(input_meta, weight_meta, bias_meta, running_mean_meta, running_var_meta, momentum, eps, out0_meta, out1_meta, out2_meta, out3_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      ::std::optional<at::Tensor> running_mean_;
      if (at::functionalization::impl::isFunctionalTensor(running_mean)) {
        at::functionalization::impl::sync(running_mean);
        running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean);
      } else {
        running_mean_ = running_mean;
      }
      
      ::std::optional<at::Tensor> running_var_;
      if (at::functionalization::impl::isFunctionalTensor(running_var)) {
        at::functionalization::impl::sync(running_var);
        running_var_ = at::functionalization::impl::from_functional_tensor(running_var);
      } else {
        running_var_ = running_var;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      
      at::Tensor out3_;
      if (at::functionalization::impl::isFunctionalTensor(out3)) {
        at::functionalization::impl::sync(out3);
        out3_ = at::functionalization::impl::from_functional_tensor(out3);
      } else {
        out3_ = out3;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias) || at::functionalization::impl::isFunctionalTensor(running_mean) || at::functionalization::impl::isFunctionalTensor(running_var))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_batch_norm_no_update_out::call(input_, weight_, bias_, running_mean_, running_var_, momentum, eps, out0_, out1_, out2_, out3_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_batch_norm_no_update::call(input_, weight_, bias_, running_mean_, running_var_, momentum, eps);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
  auto out3_inner = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::replace_(out3, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(out3);
  at::functionalization::impl::sync(out3);
  auto out3_inner_updated = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::propagate_xla_data_direct(out3_inner, out3_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3);
      }
    }

    at::Tensor & _sparse_sum_out_dim_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_sparse_sum_dim_out::call(self_meta, dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_sparse_sum_dim_out::call(self_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_sparse_sum_dim::call(self_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _sparse_sum_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_meta = to_meta(grad);
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_sparse_sum_backward_out::call(grad_meta, self_meta, dim, out_meta);
      }
      
      at::Tensor grad_;
      if (at::functionalization::impl::isFunctionalTensor(grad)) {
        at::functionalization::impl::sync(grad);
        grad_ = at::functionalization::impl::from_functional_tensor(grad);
      } else {
        grad_ = grad;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_sparse_sum_backward_out::call(grad_, self_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_sparse_sum_backward::call(grad_, self_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _sparse_csr_sum_out_dim_dtype_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_sparse_csr_sum_dim_dtype_out::call(self_meta, dim, keepdim, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_sparse_csr_sum_dim_dtype_out::call(self_, dim, keepdim, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_sparse_csr_sum_dim_dtype::call(self_, dim, keepdim, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _sparse_csr_prod_out_dim_dtype_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_sparse_csr_prod_dim_dtype_out::call(self_meta, dim, keepdim, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_sparse_csr_prod_dim_dtype_out::call(self_, dim, keepdim, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_sparse_csr_prod_dim_dtype::call(self_, dim, keepdim, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _sparse_softmax_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_sparse_softmax_out::call(self_meta, dim, half_to_float, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_sparse_softmax_out::call(self_, dim, half_to_float, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_sparse_softmax::call(self_, dim, half_to_float);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _sparse_softmax_backward_data_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto output_meta = to_meta(output);
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_sparse_softmax_backward_data_out::call(grad_output_meta, output_meta, dim, self_meta, out_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor output_;
      if (at::functionalization::impl::isFunctionalTensor(output)) {
        at::functionalization::impl::sync(output);
        output_ = at::functionalization::impl::from_functional_tensor(output);
      } else {
        output_ = output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(output) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_sparse_softmax_backward_data_out::call(grad_output_, output_, dim, self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_sparse_softmax_backward_data::call(grad_output_, output_, dim, self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _sparse_log_softmax_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_sparse_log_softmax_out::call(self_meta, dim, half_to_float, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_sparse_log_softmax_out::call(self_, dim, half_to_float, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_sparse_log_softmax::call(self_, dim, half_to_float);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _sparse_log_softmax_backward_data_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto output_meta = to_meta(output);
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_sparse_log_softmax_backward_data_out::call(grad_output_meta, output_meta, dim, self_meta, out_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor output_;
      if (at::functionalization::impl::isFunctionalTensor(output)) {
        at::functionalization::impl::sync(output);
        output_ = at::functionalization::impl::from_functional_tensor(output);
      } else {
        output_ = output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(output) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_sparse_log_softmax_backward_data_out::call(grad_output_, output_, dim, self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_sparse_log_softmax_backward_data::call(grad_output_, output_, dim, self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _spdiags_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, ::std::optional<at::Layout> layout, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto diagonals_meta = to_meta(diagonals);
        auto offsets_meta = to_meta(offsets);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_spdiags_out::call(diagonals_meta, offsets_meta, shape, layout, out_meta);
      }
      
      at::Tensor diagonals_;
      if (at::functionalization::impl::isFunctionalTensor(diagonals)) {
        at::functionalization::impl::sync(diagonals);
        diagonals_ = at::functionalization::impl::from_functional_tensor(diagonals);
      } else {
        diagonals_ = diagonals;
      }
      
      at::Tensor offsets_;
      if (at::functionalization::impl::isFunctionalTensor(offsets)) {
        at::functionalization::impl::sync(offsets);
        offsets_ = at::functionalization::impl::from_functional_tensor(offsets);
      } else {
        offsets_ = offsets;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || diagonals.device().type() == c10::DeviceType::XLA || offsets.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(diagonals) || at::functionalization::impl::isFunctionalTensor(offsets))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_spdiags_out::call(diagonals_, offsets_, shape, layout, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_spdiags::call(diagonals_, offsets_, shape, layout);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & norm_out_ScalarOpt_dtype_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::ScalarType dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::norm_ScalarOpt_dtype_out::call(self_meta, p, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::norm_ScalarOpt_dtype_out::call(self_, p, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::norm_ScalarOpt_dtype::call(self_, p, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & norm_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::norm_Scalar_out::call(self_meta, p, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::norm_Scalar_out::call(self_, p, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::norm_Scalar::call(self_, p);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & norm_out_dtype_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::norm_dtype_out::call(self_meta, p, dim, keepdim, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::norm_dtype_out::call(self_, p, dim, keepdim, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::norm_ScalarOpt_dim_dtype::call(self_, p, dim, keepdim, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::norm_out::call(self_meta, p, dim, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::norm_out::call(self_, p, dim, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::norm_ScalarOpt_dim::call(self_, p, dim, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & norm_out_names_dtype_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::norm_names_dtype_out::call(self_meta, p, dim, keepdim, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::norm_names_dtype_out::call(self_, p, dim, keepdim, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::norm_names_ScalarOpt_dim_dtype::call(self_, p, dim, keepdim, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & norm_out_names_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::norm_names_out::call(self_meta, p, dim, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::norm_names_out::call(self_, p, dim, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::norm_names_ScalarOpt_dim::call(self_, p, dim, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> frexp_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto mantissa_meta = to_meta(mantissa);
        auto exponent_meta = to_meta(exponent);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::frexp_Tensor_out::call(self_meta, mantissa_meta, exponent_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor mantissa_;
      if (at::functionalization::impl::isFunctionalTensor(mantissa)) {
        at::functionalization::impl::sync(mantissa);
        mantissa_ = at::functionalization::impl::from_functional_tensor(mantissa);
      } else {
        mantissa_ = mantissa;
      }
      
      at::Tensor exponent_;
      if (at::functionalization::impl::isFunctionalTensor(exponent)) {
        at::functionalization::impl::sync(exponent);
        exponent_ = at::functionalization::impl::from_functional_tensor(exponent);
      } else {
        exponent_ = exponent;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(mantissa) && at::functionalization::impl::isFunctionalTensor(exponent))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::frexp_Tensor_out::call(self_, mantissa_, exponent_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(mantissa, exponent);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::frexp_Tensor::call(self_);
        }
          auto mantissa_inner = at::functionalization::impl::from_functional_tensor(mantissa);
  at::functionalization::impl::replace_(mantissa, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(mantissa);
  at::functionalization::impl::sync(mantissa);
  auto mantissa_inner_updated = at::functionalization::impl::from_functional_tensor(mantissa);
  at::functionalization::impl::propagate_xla_data_direct(mantissa_inner, mantissa_inner_updated);
  auto exponent_inner = at::functionalization::impl::from_functional_tensor(exponent);
  at::functionalization::impl::replace_(exponent, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(exponent);
  at::functionalization::impl::sync(exponent);
  auto exponent_inner_updated = at::functionalization::impl::from_functional_tensor(exponent);
  at::functionalization::impl::propagate_xla_data_direct(exponent_inner, exponent_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(mantissa, exponent);
      }
    }

    at::Tensor & frobenius_norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::frobenius_norm_out::call(self_meta, dim, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::frobenius_norm_out::call(self_, dim, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::frobenius_norm_dim::call(self_, dim, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & nuclear_norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::nuclear_norm_out::call(self_meta, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::nuclear_norm_out::call(self_, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::nuclear_norm::call(self_, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & nuclear_norm_out_dim_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::nuclear_norm_dim_out::call(self_meta, dim, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::nuclear_norm_dim_out::call(self_, dim, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::nuclear_norm_dim::call(self_, dim, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & clone_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::clone_out::call(self_meta, memory_format, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::clone_out::call(self_, memory_format, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::clone::call(self_, memory_format);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    const at::Tensor & resize_as_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template, ::std::optional<at::MemoryFormat> memory_format, const at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto the_template_meta = to_meta(the_template);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::resize_as_out::call(self_meta, the_template_meta, memory_format, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor the_template_;
      if (at::functionalization::impl::isFunctionalTensor(the_template)) {
        at::functionalization::impl::sync(the_template);
        the_template_ = at::functionalization::impl::from_functional_tensor(the_template);
      } else {
        the_template_ = the_template;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || the_template.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(the_template))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::resize_as_out::call(self_, the_template_, memory_format, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::resize_as::call(self_, the_template_, memory_format);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    const at::Tensor & resize_as_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template, ::std::optional<at::MemoryFormat> memory_format) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto the_template_meta = to_meta(the_template);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::resize_as_::call(self_meta, the_template_meta, memory_format);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor the_template_;
      if (at::functionalization::impl::isFunctionalTensor(the_template)) {
        at::functionalization::impl::sync(the_template);
        the_template_ = at::functionalization::impl::from_functional_tensor(the_template);
      } else {
        the_template_ = the_template;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || the_template.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(the_template))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::resize_as_::call(self_, the_template_, memory_format);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::resize_as::call(self_, the_template_, memory_format);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    const at::Tensor & resize_as_sparse_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template, const at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto the_template_meta = to_meta(the_template);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::resize_as_sparse_out::call(self_meta, the_template_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor the_template_;
      if (at::functionalization::impl::isFunctionalTensor(the_template)) {
        at::functionalization::impl::sync(the_template);
        the_template_ = at::functionalization::impl::from_functional_tensor(the_template);
      } else {
        the_template_ = the_template;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || the_template.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(the_template))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::resize_as_sparse_out::call(self_, the_template_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::resize_as_sparse::call(self_, the_template_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    const at::Tensor & resize_as_sparse_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto the_template_meta = to_meta(the_template);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::resize_as_sparse_::call(self_meta, the_template_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor the_template_;
      if (at::functionalization::impl::isFunctionalTensor(the_template)) {
        at::functionalization::impl::sync(the_template);
        the_template_ = at::functionalization::impl::from_functional_tensor(the_template);
      } else {
        the_template_ = the_template;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || the_template.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(the_template))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::resize_as_sparse_::call(self_, the_template_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::resize_as_sparse::call(self_, the_template_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & zero_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::zero_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::zero_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::zero::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & zero_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::zero_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::zero_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::zero::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & sub_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sub_out::call(self_meta, other_meta, alpha, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sub_out::call(self_, other_, alpha, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sub_Tensor::call(self_, other_, alpha);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & sub__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sub__Tensor::call(self_meta, other_meta, alpha);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sub__Tensor::call(self_, other_, alpha);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sub_Tensor::call(self_, other_, alpha);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & sub_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sub_Scalar_out::call(self_meta, other, alpha, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sub_Scalar_out::call(self_, other, alpha, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sub_Scalar::call(self_, other, alpha);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & sub__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sub__Scalar::call(self_meta, other, alpha);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sub__Scalar::call(self_, other, alpha);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sub_Scalar::call(self_, other, alpha);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & subtract_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::subtract_out::call(self_meta, other_meta, alpha, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::subtract_out::call(self_, other_, alpha, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::subtract_Tensor::call(self_, other_, alpha);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & subtract__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::subtract__Tensor::call(self_meta, other_meta, alpha);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::subtract__Tensor::call(self_, other_, alpha);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::subtract_Tensor::call(self_, other_, alpha);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & rsub_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::rsub_Tensor_out::call(self_meta, other_meta, alpha, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::rsub_Tensor_out::call(self_, other_, alpha, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::rsub_Tensor::call(self_, other_, alpha);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & heaviside_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & values, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto values_meta = to_meta(values);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::heaviside_out::call(self_meta, values_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || values.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(values))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::heaviside_out::call(self_, values_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::heaviside::call(self_, values_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & heaviside_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & values) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto values_meta = to_meta(values);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::heaviside_::call(self_meta, values_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || values.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(values))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::heaviside_::call(self_, values_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::heaviside::call(self_, values_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & rsub_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::rsub_Scalar_out::call(self_meta, other, alpha, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::rsub_Scalar_out::call(self_, other, alpha, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::rsub_Scalar::call(self_, other, alpha);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _sparse_addmm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto mat1_meta = to_meta(mat1);
        auto mat2_meta = to_meta(mat2);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_sparse_addmm_out::call(self_meta, mat1_meta, mat2_meta, beta, alpha, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor mat1_;
      if (at::functionalization::impl::isFunctionalTensor(mat1)) {
        at::functionalization::impl::sync(mat1);
        mat1_ = at::functionalization::impl::from_functional_tensor(mat1);
      } else {
        mat1_ = mat1;
      }
      
      at::Tensor mat2_;
      if (at::functionalization::impl::isFunctionalTensor(mat2)) {
        at::functionalization::impl::sync(mat2);
        mat2_ = at::functionalization::impl::from_functional_tensor(mat2);
      } else {
        mat2_ = mat2;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || mat1.device().type() == c10::DeviceType::XLA || mat2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mat1) || at::functionalization::impl::isFunctionalTensor(mat2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_sparse_addmm_out::call(self_, mat1_, mat2_, beta, alpha, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_sparse_addmm::call(self_, mat1_, mat2_, beta, alpha);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & sparse_sampled_addmm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto mat1_meta = to_meta(mat1);
        auto mat2_meta = to_meta(mat2);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sparse_sampled_addmm_out::call(self_meta, mat1_meta, mat2_meta, beta, alpha, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor mat1_;
      if (at::functionalization::impl::isFunctionalTensor(mat1)) {
        at::functionalization::impl::sync(mat1);
        mat1_ = at::functionalization::impl::from_functional_tensor(mat1);
      } else {
        mat1_ = mat1;
      }
      
      at::Tensor mat2_;
      if (at::functionalization::impl::isFunctionalTensor(mat2)) {
        at::functionalization::impl::sync(mat2);
        mat2_ = at::functionalization::impl::from_functional_tensor(mat2);
      } else {
        mat2_ = mat2;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || mat1.device().type() == c10::DeviceType::XLA || mat2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mat1) || at::functionalization::impl::isFunctionalTensor(mat2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sparse_sampled_addmm_out::call(self_, mat1_, mat2_, beta, alpha, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sparse_sampled_addmm::call(self_, mat1_, mat2_, beta, alpha);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & addmm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto mat1_meta = to_meta(mat1);
        auto mat2_meta = to_meta(mat2);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::addmm_out::call(self_meta, mat1_meta, mat2_meta, beta, alpha, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor mat1_;
      if (at::functionalization::impl::isFunctionalTensor(mat1)) {
        at::functionalization::impl::sync(mat1);
        mat1_ = at::functionalization::impl::from_functional_tensor(mat1);
      } else {
        mat1_ = mat1;
      }
      
      at::Tensor mat2_;
      if (at::functionalization::impl::isFunctionalTensor(mat2)) {
        at::functionalization::impl::sync(mat2);
        mat2_ = at::functionalization::impl::from_functional_tensor(mat2);
      } else {
        mat2_ = mat2;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || mat1.device().type() == c10::DeviceType::XLA || mat2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mat1) || at::functionalization::impl::isFunctionalTensor(mat2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::addmm_out::call(self_, mat1_, mat2_, beta, alpha, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::addmm::call(self_, mat1_, mat2_, beta, alpha);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & addmm_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto mat1_meta = to_meta(mat1);
        auto mat2_meta = to_meta(mat2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::addmm_::call(self_meta, mat1_meta, mat2_meta, beta, alpha);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor mat1_;
      if (at::functionalization::impl::isFunctionalTensor(mat1)) {
        at::functionalization::impl::sync(mat1);
        mat1_ = at::functionalization::impl::from_functional_tensor(mat1);
      } else {
        mat1_ = mat1;
      }
      
      at::Tensor mat2_;
      if (at::functionalization::impl::isFunctionalTensor(mat2)) {
        at::functionalization::impl::sync(mat2);
        mat2_ = at::functionalization::impl::from_functional_tensor(mat2);
      } else {
        mat2_ = mat2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || mat1.device().type() == c10::DeviceType::XLA || mat2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(mat1) || at::functionalization::impl::isFunctionalTensor(mat2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::addmm_::call(self_, mat1_, mat2_, beta, alpha);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::addmm::call(self_, mat1_, mat2_, beta, alpha);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & _addmm_activation_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto mat1_meta = to_meta(mat1);
        auto mat2_meta = to_meta(mat2);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_addmm_activation_out::call(self_meta, mat1_meta, mat2_meta, beta, alpha, use_gelu, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor mat1_;
      if (at::functionalization::impl::isFunctionalTensor(mat1)) {
        at::functionalization::impl::sync(mat1);
        mat1_ = at::functionalization::impl::from_functional_tensor(mat1);
      } else {
        mat1_ = mat1;
      }
      
      at::Tensor mat2_;
      if (at::functionalization::impl::isFunctionalTensor(mat2)) {
        at::functionalization::impl::sync(mat2);
        mat2_ = at::functionalization::impl::from_functional_tensor(mat2);
      } else {
        mat2_ = mat2;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || mat1.device().type() == c10::DeviceType::XLA || mat2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mat1) || at::functionalization::impl::isFunctionalTensor(mat2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_addmm_activation_out::call(self_, mat1_, mat2_, beta, alpha, use_gelu, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_addmm_activation::call(self_, mat1_, mat2_, beta, alpha, use_gelu);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _scaled_mm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, const at::Tensor & scale_a, const at::Tensor & scale_b, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & scale_result, ::std::optional<at::ScalarType> out_dtype, bool use_fast_accum, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto mat2_meta = to_meta(mat2);
        auto scale_a_meta = to_meta(scale_a);
        auto scale_b_meta = to_meta(scale_b);
        auto bias_meta = to_meta(bias);
        auto scale_result_meta = to_meta(scale_result);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_scaled_mm_out::call(self_meta, mat2_meta, scale_a_meta, scale_b_meta, bias_meta, scale_result_meta, out_dtype, use_fast_accum, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor mat2_;
      if (at::functionalization::impl::isFunctionalTensor(mat2)) {
        at::functionalization::impl::sync(mat2);
        mat2_ = at::functionalization::impl::from_functional_tensor(mat2);
      } else {
        mat2_ = mat2;
      }
      
      at::Tensor scale_a_;
      if (at::functionalization::impl::isFunctionalTensor(scale_a)) {
        at::functionalization::impl::sync(scale_a);
        scale_a_ = at::functionalization::impl::from_functional_tensor(scale_a);
      } else {
        scale_a_ = scale_a;
      }
      
      at::Tensor scale_b_;
      if (at::functionalization::impl::isFunctionalTensor(scale_b)) {
        at::functionalization::impl::sync(scale_b);
        scale_b_ = at::functionalization::impl::from_functional_tensor(scale_b);
      } else {
        scale_b_ = scale_b;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      ::std::optional<at::Tensor> scale_result_;
      if (at::functionalization::impl::isFunctionalTensor(scale_result)) {
        at::functionalization::impl::sync(scale_result);
        scale_result_ = at::functionalization::impl::from_functional_tensor(scale_result);
      } else {
        scale_result_ = scale_result;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || mat2.device().type() == c10::DeviceType::XLA || scale_a.device().type() == c10::DeviceType::XLA || scale_b.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mat2) || at::functionalization::impl::isFunctionalTensor(scale_a) || at::functionalization::impl::isFunctionalTensor(scale_b) || at::functionalization::impl::isFunctionalTensor(bias) || at::functionalization::impl::isFunctionalTensor(scale_result))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_scaled_mm_out::call(self_, mat2_, scale_a_, scale_b_, bias_, scale_result_, out_dtype, use_fast_accum, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_scaled_mm::call(self_, mat2_, scale_a_, scale_b_, bias_, scale_result_, out_dtype, use_fast_accum);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & sparse_coo_tensor_out_size_out(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sparse_coo_tensor_size_out::call(size, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sparse_coo_tensor_size_out::call(size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sparse_coo_tensor_size::call(size, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _sparse_coo_tensor_with_dims_out_out(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_sparse_coo_tensor_with_dims_out::call(sparse_dim, dense_dim, size, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_sparse_coo_tensor_with_dims_out::call(sparse_dim, dense_dim, size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_sparse_coo_tensor_with_dims::call(sparse_dim, dense_dim, size, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_out_out(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional<bool> is_coalesced, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto indices_meta = to_meta(indices);
        auto values_meta = to_meta(values);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, size, indices_meta, values_meta, is_coalesced, out_meta);
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || indices.device().type() == c10::DeviceType::XLA || values.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(indices) || at::functionalization::impl::isFunctionalTensor(values))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::call(sparse_dim, dense_dim, size, indices_, values_, is_coalesced, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, size, indices_, values_, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt, is_coalesced);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    const at::Tensor & sparse_resize_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sparse_resize_out::call(self_meta, size, sparse_dim, dense_dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sparse_resize_out::call(self_, size, sparse_dim, dense_dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sparse_resize::call(self_, size, sparse_dim, dense_dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    const at::Tensor & sparse_resize_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sparse_resize_::call(self_meta, size, sparse_dim, dense_dim);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sparse_resize_::call(self_, size, sparse_dim, dense_dim);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sparse_resize::call(self_, size, sparse_dim, dense_dim);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    const at::Tensor & sparse_resize_and_clear_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sparse_resize_and_clear_out::call(self_meta, size, sparse_dim, dense_dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sparse_resize_and_clear_out::call(self_, size, sparse_dim, dense_dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sparse_resize_and_clear::call(self_, size, sparse_dim, dense_dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    const at::Tensor & sparse_resize_and_clear_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sparse_resize_and_clear_::call(self_meta, size, sparse_dim, dense_dim);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sparse_resize_and_clear_::call(self_, size, sparse_dim, dense_dim);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sparse_resize_and_clear::call(self_, size, sparse_dim, dense_dim);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & sparse_mask_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto mask_meta = to_meta(mask);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sparse_mask_out::call(self_meta, mask_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor mask_;
      if (at::functionalization::impl::isFunctionalTensor(mask)) {
        at::functionalization::impl::sync(mask);
        mask_ = at::functionalization::impl::from_functional_tensor(mask);
      } else {
        mask_ = mask;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || mask.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mask))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sparse_mask_out::call(self_, mask_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sparse_mask::call(self_, mask_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _sparse_mask_projection_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto mask_meta = to_meta(mask);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_sparse_mask_projection_out::call(self_meta, mask_meta, accumulate_matches, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor mask_;
      if (at::functionalization::impl::isFunctionalTensor(mask)) {
        at::functionalization::impl::sync(mask);
        mask_ = at::functionalization::impl::from_functional_tensor(mask);
      } else {
        mask_ = mask;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || mask.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mask))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_sparse_mask_projection_out::call(self_, mask_, accumulate_matches, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_sparse_mask_projection::call(self_, mask_, accumulate_matches);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _to_dense_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<bool> masked_grad, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_to_dense_out::call(self_meta, dtype, masked_grad, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_to_dense_out::call(self_, dtype, masked_grad, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_to_dense::call(self_, dtype, masked_grad);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _coalesce_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_coalesce_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_coalesce_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_coalesce::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _coalesced_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool coalesced, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_coalesced_out::call(self_meta, coalesced, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_coalesced_out::call(self_, coalesced, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_coalesced::call(self_, coalesced);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _coalesced_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, bool coalesced) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_coalesced_::call(self_meta, coalesced);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_coalesced_::call(self_, coalesced);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_coalesced::call(self_, coalesced);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & hspmm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto mat1_meta = to_meta(mat1);
        auto mat2_meta = to_meta(mat2);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::hspmm_out::call(mat1_meta, mat2_meta, out_meta);
      }
      
      at::Tensor mat1_;
      if (at::functionalization::impl::isFunctionalTensor(mat1)) {
        at::functionalization::impl::sync(mat1);
        mat1_ = at::functionalization::impl::from_functional_tensor(mat1);
      } else {
        mat1_ = mat1;
      }
      
      at::Tensor mat2_;
      if (at::functionalization::impl::isFunctionalTensor(mat2)) {
        at::functionalization::impl::sync(mat2);
        mat2_ = at::functionalization::impl::from_functional_tensor(mat2);
      } else {
        mat2_ = mat2;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || mat1.device().type() == c10::DeviceType::XLA || mat2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(mat1) || at::functionalization::impl::isFunctionalTensor(mat2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::hspmm_out::call(mat1_, mat2_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::hspmm::call(mat1_, mat2_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & copy_sparse_to_sparse_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto src_meta = to_meta(src);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::copy_sparse_to_sparse_out::call(self_meta, src_meta, non_blocking, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor src_;
      if (at::functionalization::impl::isFunctionalTensor(src)) {
        at::functionalization::impl::sync(src);
        src_ = at::functionalization::impl::from_functional_tensor(src);
      } else {
        src_ = src;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || src.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(src))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::copy_sparse_to_sparse_out::call(self_, src_, non_blocking, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::copy_sparse_to_sparse::call(self_, src_, non_blocking);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & copy_sparse_to_sparse_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & src, bool non_blocking) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto src_meta = to_meta(src);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::copy_sparse_to_sparse_::call(self_meta, src_meta, non_blocking);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor src_;
      if (at::functionalization::impl::isFunctionalTensor(src)) {
        at::functionalization::impl::sync(src);
        src_ = at::functionalization::impl::from_functional_tensor(src);
      } else {
        src_ = src;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || src.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(src))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::copy_sparse_to_sparse_::call(self_, src_, non_blocking);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::copy_sparse_to_sparse::call(self_, src_, non_blocking);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & _to_sparse_out_sparse_dim_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sparse_dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_to_sparse_sparse_dim_out::call(self_meta, sparse_dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_to_sparse_sparse_dim_out::call(self_, sparse_dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_to_sparse_sparse_dim::call(self_, sparse_dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _to_sparse_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, ::std::optional<int64_t> dense_dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_to_sparse_out::call(self_meta, layout, blocksize, dense_dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_to_sparse_out::call(self_, layout, blocksize, dense_dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_to_sparse::call(self_, layout, blocksize, dense_dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _to_sparse_csr_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<int64_t> dense_dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_to_sparse_csr_out::call(self_meta, dense_dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_to_sparse_csr_out::call(self_, dense_dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_to_sparse_csr::call(self_, dense_dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _to_sparse_csc_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<int64_t> dense_dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_to_sparse_csc_out::call(self_meta, dense_dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_to_sparse_csc_out::call(self_, dense_dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_to_sparse_csc::call(self_, dense_dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _to_sparse_bsr_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_to_sparse_bsr_out::call(self_meta, blocksize, dense_dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_to_sparse_bsr_out::call(self_, blocksize, dense_dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_to_sparse_bsr::call(self_, blocksize, dense_dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _to_sparse_bsc_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional<int64_t> dense_dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_to_sparse_bsc_out::call(self_meta, blocksize, dense_dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_to_sparse_bsc_out::call(self_, blocksize, dense_dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_to_sparse_bsc::call(self_, blocksize, dense_dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & to_mkldnn_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::to_mkldnn_out::call(self_meta, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::to_mkldnn_out::call(self_, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::to_mkldnn::call(self_, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & mkldnn_reorder_conv2d_weight_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mkldnn_reorder_conv2d_weight_out::call(self_meta, padding, stride, dilation, groups, input_size, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mkldnn_reorder_conv2d_weight_out::call(self_, padding, stride, dilation, groups, input_size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mkldnn_reorder_conv2d_weight::call(self_, padding, stride, dilation, groups, input_size);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & mkldnn_reorder_conv3d_weight_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mkldnn_reorder_conv3d_weight_out::call(self_meta, padding, stride, dilation, groups, input_size, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mkldnn_reorder_conv3d_weight_out::call(self_, padding, stride, dilation, groups, input_size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mkldnn_reorder_conv3d_weight::call(self_, padding, stride, dilation, groups, input_size);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & quantize_per_tensor_dynamic_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype, bool reduce_range, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::quantize_per_tensor_dynamic_out::call(self_meta, dtype, reduce_range, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::quantize_per_tensor_dynamic_out::call(self_, dtype, reduce_range, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::quantize_per_tensor_dynamic::call(self_, dtype, reduce_range);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & quantize_per_tensor_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::quantize_per_tensor_out::call(self_meta, scale, zero_point, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::quantize_per_tensor_out::call(self_, scale, zero_point, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::quantize_per_tensor::call(self_, scale, zero_point, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & quantize_per_tensor_out_tensor_qparams_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto scale_meta = to_meta(scale);
        auto zero_point_meta = to_meta(zero_point);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::quantize_per_tensor_tensor_qparams_out::call(self_meta, scale_meta, zero_point_meta, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor scale_;
      if (at::functionalization::impl::isFunctionalTensor(scale)) {
        at::functionalization::impl::sync(scale);
        scale_ = at::functionalization::impl::from_functional_tensor(scale);
      } else {
        scale_ = scale;
      }
      
      at::Tensor zero_point_;
      if (at::functionalization::impl::isFunctionalTensor(zero_point)) {
        at::functionalization::impl::sync(zero_point);
        zero_point_ = at::functionalization::impl::from_functional_tensor(zero_point);
      } else {
        zero_point_ = zero_point;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || scale.device().type() == c10::DeviceType::XLA || zero_point.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(scale) || at::functionalization::impl::isFunctionalTensor(zero_point))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::quantize_per_tensor_tensor_qparams_out::call(self_, scale_, zero_point_, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::quantize_per_tensor_tensor_qparams::call(self_, scale_, zero_point_, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    void quantize_per_tensor_out_tensors_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto tensors_meta = to_meta(tensors);
        auto scales_meta = to_meta(scales);
        auto zero_points_meta = to_meta(zero_points);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::quantize_per_tensor_tensors_out::call(tensors_meta, scales_meta, zero_points_meta, dtype, out_meta);
      }
      
      ::std::vector<at::Tensor> tensors_;
      if (at::functionalization::impl::isFunctionalTensor(tensors)) {
        at::functionalization::impl::sync(tensors);
        tensors_ = at::functionalization::impl::from_functional_tensor(tensors);
      } else {
        tensors_ = tensors.vec();
      }
      
      at::Tensor scales_;
      if (at::functionalization::impl::isFunctionalTensor(scales)) {
        at::functionalization::impl::sync(scales);
        scales_ = at::functionalization::impl::from_functional_tensor(scales);
      } else {
        scales_ = scales;
      }
      
      at::Tensor zero_points_;
      if (at::functionalization::impl::isFunctionalTensor(zero_points)) {
        at::functionalization::impl::sync(zero_points);
        zero_points_ = at::functionalization::impl::from_functional_tensor(zero_points);
      } else {
        zero_points_ = zero_points;
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || scales.device().type() == c10::DeviceType::XLA || zero_points.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(tensors) || at::functionalization::impl::isFunctionalTensor(scales) || at::functionalization::impl::isFunctionalTensor(zero_points))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::quantize_per_tensor_tensors_out::call(tensors_, scales_, zero_points_, dtype, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::quantize_per_tensor_tensors::call(tensors_, scales_, zero_points_, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    at::Tensor & quantize_per_channel_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto scales_meta = to_meta(scales);
        auto zero_points_meta = to_meta(zero_points);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::quantize_per_channel_out::call(self_meta, scales_meta, zero_points_meta, axis, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor scales_;
      if (at::functionalization::impl::isFunctionalTensor(scales)) {
        at::functionalization::impl::sync(scales);
        scales_ = at::functionalization::impl::from_functional_tensor(scales);
      } else {
        scales_ = scales;
      }
      
      at::Tensor zero_points_;
      if (at::functionalization::impl::isFunctionalTensor(zero_points)) {
        at::functionalization::impl::sync(zero_points);
        zero_points_ = at::functionalization::impl::from_functional_tensor(zero_points);
      } else {
        zero_points_ = zero_points;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || scales.device().type() == c10::DeviceType::XLA || zero_points.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(scales) || at::functionalization::impl::isFunctionalTensor(zero_points))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::quantize_per_channel_out::call(self_, scales_, zero_points_, axis, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::quantize_per_channel::call(self_, scales_, zero_points_, axis, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & dequantize_out_self_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::dequantize_self_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::dequantize_self_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::dequantize_self::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    void dequantize_out_tensors_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto tensors_meta = to_meta(tensors);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::dequantize_tensors_out::call(tensors_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> tensors_;
      if (at::functionalization::impl::isFunctionalTensor(tensors)) {
        at::functionalization::impl::sync(tensors);
        tensors_ = at::functionalization::impl::from_functional_tensor(tensors);
      } else {
        tensors_ = tensors.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(tensors))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::dequantize_tensors_out::call(tensors_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::dequantize_tensors::call(tensors_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    at::Tensor & q_per_channel_scales_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::q_per_channel_scales_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::q_per_channel_scales_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::q_per_channel_scales::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & q_per_channel_zero_points_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::q_per_channel_zero_points_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::q_per_channel_zero_points_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::q_per_channel_zero_points::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & int_repr_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::int_repr_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::int_repr_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::int_repr::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _make_per_tensor_quantized_tensor_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_make_per_tensor_quantized_tensor_out::call(self_meta, scale, zero_point, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_make_per_tensor_quantized_tensor_out::call(self_, scale, zero_point, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_make_per_tensor_quantized_tensor::call(self_, scale, zero_point);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _make_per_channel_quantized_tensor_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto scale_meta = to_meta(scale);
        auto zero_point_meta = to_meta(zero_point);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_make_per_channel_quantized_tensor_out::call(self_meta, scale_meta, zero_point_meta, axis, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor scale_;
      if (at::functionalization::impl::isFunctionalTensor(scale)) {
        at::functionalization::impl::sync(scale);
        scale_ = at::functionalization::impl::from_functional_tensor(scale);
      } else {
        scale_ = scale;
      }
      
      at::Tensor zero_point_;
      if (at::functionalization::impl::isFunctionalTensor(zero_point)) {
        at::functionalization::impl::sync(zero_point);
        zero_point_ = at::functionalization::impl::from_functional_tensor(zero_point);
      } else {
        zero_point_ = zero_point;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || scale.device().type() == c10::DeviceType::XLA || zero_point.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(scale) || at::functionalization::impl::isFunctionalTensor(zero_point))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_make_per_channel_quantized_tensor_out::call(self_, scale_, zero_point_, axis, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_make_per_channel_quantized_tensor::call(self_, scale_, zero_point_, axis);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_tensor_affine_cachemask_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fake_quantize_per_tensor_affine_cachemask_out::call(self_meta, scale, zero_point, quant_min, quant_max, out0_meta, out1_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::fake_quantize_per_tensor_affine_cachemask_out::call(self_, scale, zero_point, quant_min, quant_max, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fake_quantize_per_tensor_affine_cachemask::call(self_, scale, zero_point, quant_min, quant_max);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto scale_meta = to_meta(scale);
        auto zero_point_meta = to_meta(zero_point);
        auto fake_quant_enabled_meta = to_meta(fake_quant_enabled);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out::call(self_meta, scale_meta, zero_point_meta, fake_quant_enabled_meta, quant_min, quant_max, out0_meta, out1_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor scale_;
      if (at::functionalization::impl::isFunctionalTensor(scale)) {
        at::functionalization::impl::sync(scale);
        scale_ = at::functionalization::impl::from_functional_tensor(scale);
      } else {
        scale_ = scale;
      }
      
      at::Tensor zero_point_;
      if (at::functionalization::impl::isFunctionalTensor(zero_point)) {
        at::functionalization::impl::sync(zero_point);
        zero_point_ = at::functionalization::impl::from_functional_tensor(zero_point);
      } else {
        zero_point_ = zero_point;
      }
      
      at::Tensor fake_quant_enabled_;
      if (at::functionalization::impl::isFunctionalTensor(fake_quant_enabled)) {
        at::functionalization::impl::sync(fake_quant_enabled);
        fake_quant_enabled_ = at::functionalization::impl::from_functional_tensor(fake_quant_enabled);
      } else {
        fake_quant_enabled_ = fake_quant_enabled;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || scale.device().type() == c10::DeviceType::XLA || zero_point.device().type() == c10::DeviceType::XLA || fake_quant_enabled.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(scale) || at::functionalization::impl::isFunctionalTensor(zero_point) || at::functionalization::impl::isFunctionalTensor(fake_quant_enabled))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out::call(self_, scale_, zero_point_, fake_quant_enabled_, quant_min, quant_max, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams::call(self_, scale_, zero_point_, fake_quant_enabled_, quant_min, quant_max);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    at::Tensor & _fake_quantize_learnable_per_tensor_affine_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto scale_meta = to_meta(scale);
        auto zero_point_meta = to_meta(zero_point);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_fake_quantize_learnable_per_tensor_affine_out::call(self_meta, scale_meta, zero_point_meta, quant_min, quant_max, grad_factor, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor scale_;
      if (at::functionalization::impl::isFunctionalTensor(scale)) {
        at::functionalization::impl::sync(scale);
        scale_ = at::functionalization::impl::from_functional_tensor(scale);
      } else {
        scale_ = scale;
      }
      
      at::Tensor zero_point_;
      if (at::functionalization::impl::isFunctionalTensor(zero_point)) {
        at::functionalization::impl::sync(zero_point);
        zero_point_ = at::functionalization::impl::from_functional_tensor(zero_point);
      } else {
        zero_point_ = zero_point;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || scale.device().type() == c10::DeviceType::XLA || zero_point.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(scale) || at::functionalization::impl::isFunctionalTensor(zero_point))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_fake_quantize_learnable_per_tensor_affine_out::call(self_, scale_, zero_point_, quant_min, quant_max, grad_factor, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_fake_quantize_learnable_per_tensor_affine::call(self_, scale_, zero_point_, quant_min, quant_max, grad_factor);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_channel_affine_cachemask_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto scale_meta = to_meta(scale);
        auto zero_point_meta = to_meta(zero_point);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fake_quantize_per_channel_affine_cachemask_out::call(self_meta, scale_meta, zero_point_meta, axis, quant_min, quant_max, out0_meta, out1_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor scale_;
      if (at::functionalization::impl::isFunctionalTensor(scale)) {
        at::functionalization::impl::sync(scale);
        scale_ = at::functionalization::impl::from_functional_tensor(scale);
      } else {
        scale_ = scale;
      }
      
      at::Tensor zero_point_;
      if (at::functionalization::impl::isFunctionalTensor(zero_point)) {
        at::functionalization::impl::sync(zero_point);
        zero_point_ = at::functionalization::impl::from_functional_tensor(zero_point);
      } else {
        zero_point_ = zero_point;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || scale.device().type() == c10::DeviceType::XLA || zero_point.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(scale) || at::functionalization::impl::isFunctionalTensor(zero_point))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::fake_quantize_per_channel_affine_cachemask_out::call(self_, scale_, zero_point_, axis, quant_min, quant_max, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fake_quantize_per_channel_affine_cachemask::call(self_, scale_, zero_point_, axis, quant_min, quant_max);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    at::Tensor & _fake_quantize_learnable_per_channel_affine_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto scale_meta = to_meta(scale);
        auto zero_point_meta = to_meta(zero_point);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_fake_quantize_learnable_per_channel_affine_out::call(self_meta, scale_meta, zero_point_meta, axis, quant_min, quant_max, grad_factor, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor scale_;
      if (at::functionalization::impl::isFunctionalTensor(scale)) {
        at::functionalization::impl::sync(scale);
        scale_ = at::functionalization::impl::from_functional_tensor(scale);
      } else {
        scale_ = scale;
      }
      
      at::Tensor zero_point_;
      if (at::functionalization::impl::isFunctionalTensor(zero_point)) {
        at::functionalization::impl::sync(zero_point);
        zero_point_ = at::functionalization::impl::from_functional_tensor(zero_point);
      } else {
        zero_point_ = zero_point;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || scale.device().type() == c10::DeviceType::XLA || zero_point.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(scale) || at::functionalization::impl::isFunctionalTensor(zero_point))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_fake_quantize_learnable_per_channel_affine_out::call(self_, scale_, zero_point_, axis, quant_min, quant_max, grad_factor, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_fake_quantize_learnable_per_channel_affine::call(self_, scale_, zero_point_, axis, quant_min, quant_max, grad_factor);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> _fused_moving_avg_obs_fq_helper_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto observer_on_meta = to_meta(observer_on);
        auto fake_quant_on_meta = to_meta(fake_quant_on);
        auto running_min_meta = to_meta(running_min);
        auto running_max_meta = to_meta(running_max);
        auto scale_meta = to_meta(scale);
        auto zero_point_meta = to_meta(zero_point);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_fused_moving_avg_obs_fq_helper_out::call(self_meta, observer_on_meta, fake_quant_on_meta, running_min_meta, running_max_meta, scale_meta, zero_point_meta, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant, out0_meta, out1_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor observer_on_;
      if (at::functionalization::impl::isFunctionalTensor(observer_on)) {
        at::functionalization::impl::sync(observer_on);
        observer_on_ = at::functionalization::impl::from_functional_tensor(observer_on);
      } else {
        observer_on_ = observer_on;
      }
      
      at::Tensor fake_quant_on_;
      if (at::functionalization::impl::isFunctionalTensor(fake_quant_on)) {
        at::functionalization::impl::sync(fake_quant_on);
        fake_quant_on_ = at::functionalization::impl::from_functional_tensor(fake_quant_on);
      } else {
        fake_quant_on_ = fake_quant_on;
      }
      
      at::Tensor running_min_;
      if (at::functionalization::impl::isFunctionalTensor(running_min)) {
        at::functionalization::impl::sync(running_min);
        running_min_ = at::functionalization::impl::from_functional_tensor(running_min);
      } else {
        running_min_ = running_min;
      }
      
      at::Tensor running_max_;
      if (at::functionalization::impl::isFunctionalTensor(running_max)) {
        at::functionalization::impl::sync(running_max);
        running_max_ = at::functionalization::impl::from_functional_tensor(running_max);
      } else {
        running_max_ = running_max;
      }
      
      at::Tensor scale_;
      if (at::functionalization::impl::isFunctionalTensor(scale)) {
        at::functionalization::impl::sync(scale);
        scale_ = at::functionalization::impl::from_functional_tensor(scale);
      } else {
        scale_ = scale;
      }
      
      at::Tensor zero_point_;
      if (at::functionalization::impl::isFunctionalTensor(zero_point)) {
        at::functionalization::impl::sync(zero_point);
        zero_point_ = at::functionalization::impl::from_functional_tensor(zero_point);
      } else {
        zero_point_ = zero_point;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(running_min) && at::functionalization::impl::isFunctionalTensor(running_max) && at::functionalization::impl::isFunctionalTensor(scale) && at::functionalization::impl::isFunctionalTensor(zero_point) && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || observer_on.device().type() == c10::DeviceType::XLA || fake_quant_on.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(observer_on) || at::functionalization::impl::isFunctionalTensor(fake_quant_on))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_fused_moving_avg_obs_fq_helper_out::call(self_, observer_on_, fake_quant_on_, running_min_, running_max_, scale_, zero_point_, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_fused_moving_avg_obs_fq_helper_functional::call(self_, observer_on_, fake_quant_on_, running_min_, running_max_, scale_, zero_point_, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
        }
          auto running_min_inner = at::functionalization::impl::from_functional_tensor(running_min);
  at::functionalization::impl::replace_(running_min, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(running_min);
  at::functionalization::impl::sync(running_min);
  auto running_min_inner_updated = at::functionalization::impl::from_functional_tensor(running_min);
  at::functionalization::impl::propagate_xla_data_direct(running_min_inner, running_min_inner_updated);
  auto running_max_inner = at::functionalization::impl::from_functional_tensor(running_max);
  at::functionalization::impl::replace_(running_max, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(running_max);
  at::functionalization::impl::sync(running_max);
  auto running_max_inner_updated = at::functionalization::impl::from_functional_tensor(running_max);
  at::functionalization::impl::propagate_xla_data_direct(running_max_inner, running_max_inner_updated);
  auto scale_inner = at::functionalization::impl::from_functional_tensor(scale);
  at::functionalization::impl::replace_(scale, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(scale);
  at::functionalization::impl::sync(scale);
  auto scale_inner_updated = at::functionalization::impl::from_functional_tensor(scale);
  at::functionalization::impl::propagate_xla_data_direct(scale_inner, scale_inner_updated);
  auto zero_point_inner = at::functionalization::impl::from_functional_tensor(zero_point);
  at::functionalization::impl::replace_(zero_point, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(zero_point);
  at::functionalization::impl::sync(zero_point);
  auto zero_point_inner_updated = at::functionalization::impl::from_functional_tensor(zero_point);
  at::functionalization::impl::propagate_xla_data_direct(zero_point_inner, zero_point_inner_updated);
  auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<4>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<5>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    ::std::tuple<at::Tensor,at::Tensor> _fused_moving_avg_obs_fq_helper(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto observer_on_meta = to_meta(observer_on);
        auto fake_quant_on_meta = to_meta(fake_quant_on);
        auto running_min_meta = to_meta(running_min);
        auto running_max_meta = to_meta(running_max);
        auto scale_meta = to_meta(scale);
        auto zero_point_meta = to_meta(zero_point);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_fused_moving_avg_obs_fq_helper::call(self_meta, observer_on_meta, fake_quant_on_meta, running_min_meta, running_max_meta, scale_meta, zero_point_meta, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor observer_on_;
      if (at::functionalization::impl::isFunctionalTensor(observer_on)) {
        at::functionalization::impl::sync(observer_on);
        observer_on_ = at::functionalization::impl::from_functional_tensor(observer_on);
      } else {
        observer_on_ = observer_on;
      }
      
      at::Tensor fake_quant_on_;
      if (at::functionalization::impl::isFunctionalTensor(fake_quant_on)) {
        at::functionalization::impl::sync(fake_quant_on);
        fake_quant_on_ = at::functionalization::impl::from_functional_tensor(fake_quant_on);
      } else {
        fake_quant_on_ = fake_quant_on;
      }
      
      at::Tensor running_min_;
      if (at::functionalization::impl::isFunctionalTensor(running_min)) {
        at::functionalization::impl::sync(running_min);
        running_min_ = at::functionalization::impl::from_functional_tensor(running_min);
      } else {
        running_min_ = running_min;
      }
      
      at::Tensor running_max_;
      if (at::functionalization::impl::isFunctionalTensor(running_max)) {
        at::functionalization::impl::sync(running_max);
        running_max_ = at::functionalization::impl::from_functional_tensor(running_max);
      } else {
        running_max_ = running_max;
      }
      
      at::Tensor scale_;
      if (at::functionalization::impl::isFunctionalTensor(scale)) {
        at::functionalization::impl::sync(scale);
        scale_ = at::functionalization::impl::from_functional_tensor(scale);
      } else {
        scale_ = scale;
      }
      
      at::Tensor zero_point_;
      if (at::functionalization::impl::isFunctionalTensor(zero_point)) {
        at::functionalization::impl::sync(zero_point);
        zero_point_ = at::functionalization::impl::from_functional_tensor(zero_point);
      } else {
        zero_point_ = zero_point;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(running_min) && at::functionalization::impl::isFunctionalTensor(running_max) && at::functionalization::impl::isFunctionalTensor(scale) && at::functionalization::impl::isFunctionalTensor(zero_point))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || observer_on.device().type() == c10::DeviceType::XLA || fake_quant_on.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(observer_on) || at::functionalization::impl::isFunctionalTensor(fake_quant_on))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_fused_moving_avg_obs_fq_helper::call(self_, observer_on_, fake_quant_on_, running_min_, running_max_, scale_, zero_point_, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
         return ::std::tuple<at::Tensor,at::Tensor>(std::get<0>(tmp_output), std::get<1>(tmp_output));
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_fused_moving_avg_obs_fq_helper_functional::call(self_, observer_on_, fake_quant_on_, running_min_, running_max_, scale_, zero_point_, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
        }
          auto output_0 = at::functionalization::impl::to_functional_tensor(std::get<0>(tmp_output));
  auto output_1 = at::functionalization::impl::to_functional_tensor(std::get<1>(tmp_output));
  auto running_min_inner = at::functionalization::impl::from_functional_tensor(running_min);
  at::functionalization::impl::replace_(running_min, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(running_min);
  at::functionalization::impl::sync(running_min);
  auto running_min_inner_updated = at::functionalization::impl::from_functional_tensor(running_min);
  at::functionalization::impl::propagate_xla_data_direct(running_min_inner, running_min_inner_updated);
  auto running_max_inner = at::functionalization::impl::from_functional_tensor(running_max);
  at::functionalization::impl::replace_(running_max, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(running_max);
  at::functionalization::impl::sync(running_max);
  auto running_max_inner_updated = at::functionalization::impl::from_functional_tensor(running_max);
  at::functionalization::impl::propagate_xla_data_direct(running_max_inner, running_max_inner_updated);
  auto scale_inner = at::functionalization::impl::from_functional_tensor(scale);
  at::functionalization::impl::replace_(scale, std::get<4>(tmp_output));
  at::functionalization::impl::commit_update(scale);
  at::functionalization::impl::sync(scale);
  auto scale_inner_updated = at::functionalization::impl::from_functional_tensor(scale);
  at::functionalization::impl::propagate_xla_data_direct(scale_inner, scale_inner_updated);
  auto zero_point_inner = at::functionalization::impl::from_functional_tensor(zero_point);
  at::functionalization::impl::replace_(zero_point, std::get<5>(tmp_output));
  at::functionalization::impl::commit_update(zero_point);
  at::functionalization::impl::sync(zero_point);
  auto zero_point_inner_updated = at::functionalization::impl::from_functional_tensor(zero_point);
  at::functionalization::impl::propagate_xla_data_direct(zero_point_inner, zero_point_inner_updated);
    return ::std::tuple<at::Tensor,at::Tensor>(output_0, output_1);
      }
    }

    at::Tensor & _to_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_to_copy_out::call(self_meta, non_blocking, memory_format, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_to_copy_out::call(self_, non_blocking, memory_format, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_to_copy::call(self_, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt, non_blocking, memory_format);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _lstm_mps_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto hx_meta = to_meta(hx);
        auto params_meta = to_meta(params);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        auto out3_meta = to_meta(out3);
        auto out4_meta = to_meta(out4);
        auto out5_meta = to_meta(out5);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_lstm_mps_out::call(input_meta, hx_meta, params_meta, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0_meta, out1_meta, out2_meta, out3_meta, out4_meta, out5_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      ::std::vector<at::Tensor> hx_;
      if (at::functionalization::impl::isFunctionalTensor(hx)) {
        at::functionalization::impl::sync(hx);
        hx_ = at::functionalization::impl::from_functional_tensor(hx);
      } else {
        hx_ = hx.vec();
      }
      
      ::std::vector<at::Tensor> params_;
      if (at::functionalization::impl::isFunctionalTensor(params)) {
        at::functionalization::impl::sync(params);
        params_ = at::functionalization::impl::from_functional_tensor(params);
      } else {
        params_ = params.vec();
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      
      at::Tensor out3_;
      if (at::functionalization::impl::isFunctionalTensor(out3)) {
        at::functionalization::impl::sync(out3);
        out3_ = at::functionalization::impl::from_functional_tensor(out3);
      } else {
        out3_ = out3;
      }
      
      at::Tensor out4_;
      if (at::functionalization::impl::isFunctionalTensor(out4)) {
        at::functionalization::impl::sync(out4);
        out4_ = at::functionalization::impl::from_functional_tensor(out4);
      } else {
        out4_ = out4;
      }
      
      at::Tensor out5_;
      if (at::functionalization::impl::isFunctionalTensor(out5)) {
        at::functionalization::impl::sync(out5);
        out5_ = at::functionalization::impl::from_functional_tensor(out5);
      } else {
        out5_ = out5;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3) && at::functionalization::impl::isFunctionalTensor(out4) && at::functionalization::impl::isFunctionalTensor(out5))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(hx) || at::functionalization::impl::isFunctionalTensor(params))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_lstm_mps_out::call(input_, hx_, params_, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0_, out1_, out2_, out3_, out4_, out5_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3, out4, out5);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_lstm_mps::call(input_, hx_, params_, has_biases, num_layers, dropout, train, bidirectional, batch_first);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
  auto out3_inner = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::replace_(out3, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(out3);
  at::functionalization::impl::sync(out3);
  auto out3_inner_updated = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::propagate_xla_data_direct(out3_inner, out3_inner_updated);
  auto out4_inner = at::functionalization::impl::from_functional_tensor(out4);
  at::functionalization::impl::replace_(out4, std::get<4>(tmp_output));
  at::functionalization::impl::commit_update(out4);
  at::functionalization::impl::sync(out4);
  auto out4_inner_updated = at::functionalization::impl::from_functional_tensor(out4);
  at::functionalization::impl::propagate_xla_data_direct(out4_inner, out4_inner_updated);
  auto out5_inner = at::functionalization::impl::from_functional_tensor(out5);
  at::functionalization::impl::replace_(out5, std::get<5>(tmp_output));
  at::functionalization::impl::commit_update(out5);
  at::functionalization::impl::sync(out5);
  auto out5_inner_updated = at::functionalization::impl::from_functional_tensor(out5);
  at::functionalization::impl::propagate_xla_data_direct(out5_inner, out5_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3, out4, out5);
      }
    }

    void lstm_mps_backward_out_out(c10::DispatchKeySet dispatchKeySet, const ::std::optional<at::Tensor> & grad_y, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_y_meta = to_meta(grad_y);
        auto grad_hy_meta = to_meta(grad_hy);
        auto grad_cy_meta = to_meta(grad_cy);
        auto z_state_meta = to_meta(z_state);
        auto cell_state_fwd_meta = to_meta(cell_state_fwd);
        auto input_meta = to_meta(input);
        auto layersOutputs_meta = to_meta(layersOutputs);
        auto hx_meta = to_meta(hx);
        auto params_meta = to_meta(params);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::lstm_mps_backward_out::call(grad_y_meta, grad_hy_meta, grad_cy_meta, z_state_meta, cell_state_fwd_meta, input_meta, layersOutputs_meta, hx_meta, params_meta, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0_meta, out1_meta, out2_meta);
      }
      
      ::std::optional<at::Tensor> grad_y_;
      if (at::functionalization::impl::isFunctionalTensor(grad_y)) {
        at::functionalization::impl::sync(grad_y);
        grad_y_ = at::functionalization::impl::from_functional_tensor(grad_y);
      } else {
        grad_y_ = grad_y;
      }
      
      ::std::optional<at::Tensor> grad_hy_;
      if (at::functionalization::impl::isFunctionalTensor(grad_hy)) {
        at::functionalization::impl::sync(grad_hy);
        grad_hy_ = at::functionalization::impl::from_functional_tensor(grad_hy);
      } else {
        grad_hy_ = grad_hy;
      }
      
      ::std::optional<at::Tensor> grad_cy_;
      if (at::functionalization::impl::isFunctionalTensor(grad_cy)) {
        at::functionalization::impl::sync(grad_cy);
        grad_cy_ = at::functionalization::impl::from_functional_tensor(grad_cy);
      } else {
        grad_cy_ = grad_cy;
      }
      
      at::Tensor z_state_;
      if (at::functionalization::impl::isFunctionalTensor(z_state)) {
        at::functionalization::impl::sync(z_state);
        z_state_ = at::functionalization::impl::from_functional_tensor(z_state);
      } else {
        z_state_ = z_state;
      }
      
      at::Tensor cell_state_fwd_;
      if (at::functionalization::impl::isFunctionalTensor(cell_state_fwd)) {
        at::functionalization::impl::sync(cell_state_fwd);
        cell_state_fwd_ = at::functionalization::impl::from_functional_tensor(cell_state_fwd);
      } else {
        cell_state_fwd_ = cell_state_fwd;
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor layersOutputs_;
      if (at::functionalization::impl::isFunctionalTensor(layersOutputs)) {
        at::functionalization::impl::sync(layersOutputs);
        layersOutputs_ = at::functionalization::impl::from_functional_tensor(layersOutputs);
      } else {
        layersOutputs_ = layersOutputs;
      }
      
      ::std::vector<at::Tensor> hx_;
      if (at::functionalization::impl::isFunctionalTensor(hx)) {
        at::functionalization::impl::sync(hx);
        hx_ = at::functionalization::impl::from_functional_tensor(hx);
      } else {
        hx_ = hx.vec();
      }
      
      ::std::vector<at::Tensor> params_;
      if (at::functionalization::impl::isFunctionalTensor(params)) {
        at::functionalization::impl::sync(params);
        params_ = at::functionalization::impl::from_functional_tensor(params);
      } else {
        params_ = params.vec();
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      ::std::vector<at::Tensor> out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1.vec();
      }
      
      ::std::vector<at::Tensor> out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || z_state.device().type() == c10::DeviceType::XLA || cell_state_fwd.device().type() == c10::DeviceType::XLA || input.device().type() == c10::DeviceType::XLA || layersOutputs.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_y) || at::functionalization::impl::isFunctionalTensor(grad_hy) || at::functionalization::impl::isFunctionalTensor(grad_cy) || at::functionalization::impl::isFunctionalTensor(z_state) || at::functionalization::impl::isFunctionalTensor(cell_state_fwd) || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(layersOutputs) || at::functionalization::impl::isFunctionalTensor(hx) || at::functionalization::impl::isFunctionalTensor(params))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::lstm_mps_backward_out::call(grad_y_, grad_hy_, grad_cy_, z_state_, cell_state_fwd_, input_, layersOutputs_, hx_, params_, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0_, out1_, out2_);
         
        }
      } else {
        ::std::tuple<at::Tensor,::std::vector<at::Tensor>,::std::vector<at::Tensor>> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::lstm_mps_backward::call(grad_y_, grad_hy_, grad_cy_, z_state_, cell_state_fwd_, input_, layersOutputs_, hx_, params_, has_biases, num_layers, dropout, train, bidirectional, batch_first);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
    
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_lstm_cell_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const ::std::optional<at::Tensor> & input_bias, const ::std::optional<at::Tensor> & hidden_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_gates_meta = to_meta(input_gates);
        auto hidden_gates_meta = to_meta(hidden_gates);
        auto cx_meta = to_meta(cx);
        auto input_bias_meta = to_meta(input_bias);
        auto hidden_bias_meta = to_meta(hidden_bias);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_thnn_fused_lstm_cell_out::call(input_gates_meta, hidden_gates_meta, cx_meta, input_bias_meta, hidden_bias_meta, out0_meta, out1_meta, out2_meta);
      }
      
      at::Tensor input_gates_;
      if (at::functionalization::impl::isFunctionalTensor(input_gates)) {
        at::functionalization::impl::sync(input_gates);
        input_gates_ = at::functionalization::impl::from_functional_tensor(input_gates);
      } else {
        input_gates_ = input_gates;
      }
      
      at::Tensor hidden_gates_;
      if (at::functionalization::impl::isFunctionalTensor(hidden_gates)) {
        at::functionalization::impl::sync(hidden_gates);
        hidden_gates_ = at::functionalization::impl::from_functional_tensor(hidden_gates);
      } else {
        hidden_gates_ = hidden_gates;
      }
      
      at::Tensor cx_;
      if (at::functionalization::impl::isFunctionalTensor(cx)) {
        at::functionalization::impl::sync(cx);
        cx_ = at::functionalization::impl::from_functional_tensor(cx);
      } else {
        cx_ = cx;
      }
      
      ::std::optional<at::Tensor> input_bias_;
      if (at::functionalization::impl::isFunctionalTensor(input_bias)) {
        at::functionalization::impl::sync(input_bias);
        input_bias_ = at::functionalization::impl::from_functional_tensor(input_bias);
      } else {
        input_bias_ = input_bias;
      }
      
      ::std::optional<at::Tensor> hidden_bias_;
      if (at::functionalization::impl::isFunctionalTensor(hidden_bias)) {
        at::functionalization::impl::sync(hidden_bias);
        hidden_bias_ = at::functionalization::impl::from_functional_tensor(hidden_bias);
      } else {
        hidden_bias_ = hidden_bias;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input_gates.device().type() == c10::DeviceType::XLA || hidden_gates.device().type() == c10::DeviceType::XLA || cx.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input_gates) || at::functionalization::impl::isFunctionalTensor(hidden_gates) || at::functionalization::impl::isFunctionalTensor(cx) || at::functionalization::impl::isFunctionalTensor(input_bias) || at::functionalization::impl::isFunctionalTensor(hidden_bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_thnn_fused_lstm_cell_out::call(input_gates_, hidden_gates_, cx_, input_bias_, hidden_bias_, out0_, out1_, out2_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_thnn_fused_lstm_cell::call(input_gates_, hidden_gates_, cx_, input_bias_, hidden_bias_);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_lstm_cell_backward_impl_out_out(c10::DispatchKeySet dispatchKeySet, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_hy_meta = to_meta(grad_hy);
        auto grad_cy_meta = to_meta(grad_cy);
        auto cx_meta = to_meta(cx);
        auto cy_meta = to_meta(cy);
        auto workspace_meta = to_meta(workspace);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_thnn_fused_lstm_cell_backward_impl_out::call(grad_hy_meta, grad_cy_meta, cx_meta, cy_meta, workspace_meta, has_bias, out0_meta, out1_meta, out2_meta);
      }
      
      ::std::optional<at::Tensor> grad_hy_;
      if (at::functionalization::impl::isFunctionalTensor(grad_hy)) {
        at::functionalization::impl::sync(grad_hy);
        grad_hy_ = at::functionalization::impl::from_functional_tensor(grad_hy);
      } else {
        grad_hy_ = grad_hy;
      }
      
      ::std::optional<at::Tensor> grad_cy_;
      if (at::functionalization::impl::isFunctionalTensor(grad_cy)) {
        at::functionalization::impl::sync(grad_cy);
        grad_cy_ = at::functionalization::impl::from_functional_tensor(grad_cy);
      } else {
        grad_cy_ = grad_cy;
      }
      
      at::Tensor cx_;
      if (at::functionalization::impl::isFunctionalTensor(cx)) {
        at::functionalization::impl::sync(cx);
        cx_ = at::functionalization::impl::from_functional_tensor(cx);
      } else {
        cx_ = cx;
      }
      
      at::Tensor cy_;
      if (at::functionalization::impl::isFunctionalTensor(cy)) {
        at::functionalization::impl::sync(cy);
        cy_ = at::functionalization::impl::from_functional_tensor(cy);
      } else {
        cy_ = cy;
      }
      
      at::Tensor workspace_;
      if (at::functionalization::impl::isFunctionalTensor(workspace)) {
        at::functionalization::impl::sync(workspace);
        workspace_ = at::functionalization::impl::from_functional_tensor(workspace);
      } else {
        workspace_ = workspace;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || cx.device().type() == c10::DeviceType::XLA || cy.device().type() == c10::DeviceType::XLA || workspace.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_hy) || at::functionalization::impl::isFunctionalTensor(grad_cy) || at::functionalization::impl::isFunctionalTensor(cx) || at::functionalization::impl::isFunctionalTensor(cy) || at::functionalization::impl::isFunctionalTensor(workspace))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_thnn_fused_lstm_cell_backward_impl_out::call(grad_hy_, grad_cy_, cx_, cy_, workspace_, has_bias, out0_, out1_, out2_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_thnn_fused_lstm_cell_backward_impl::call(grad_hy_, grad_cy_, cx_, cy_, workspace_, has_bias);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> _thnn_fused_gru_cell_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const ::std::optional<at::Tensor> & input_bias, const ::std::optional<at::Tensor> & hidden_bias, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_gates_meta = to_meta(input_gates);
        auto hidden_gates_meta = to_meta(hidden_gates);
        auto hx_meta = to_meta(hx);
        auto input_bias_meta = to_meta(input_bias);
        auto hidden_bias_meta = to_meta(hidden_bias);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_thnn_fused_gru_cell_out::call(input_gates_meta, hidden_gates_meta, hx_meta, input_bias_meta, hidden_bias_meta, out0_meta, out1_meta);
      }
      
      at::Tensor input_gates_;
      if (at::functionalization::impl::isFunctionalTensor(input_gates)) {
        at::functionalization::impl::sync(input_gates);
        input_gates_ = at::functionalization::impl::from_functional_tensor(input_gates);
      } else {
        input_gates_ = input_gates;
      }
      
      at::Tensor hidden_gates_;
      if (at::functionalization::impl::isFunctionalTensor(hidden_gates)) {
        at::functionalization::impl::sync(hidden_gates);
        hidden_gates_ = at::functionalization::impl::from_functional_tensor(hidden_gates);
      } else {
        hidden_gates_ = hidden_gates;
      }
      
      at::Tensor hx_;
      if (at::functionalization::impl::isFunctionalTensor(hx)) {
        at::functionalization::impl::sync(hx);
        hx_ = at::functionalization::impl::from_functional_tensor(hx);
      } else {
        hx_ = hx;
      }
      
      ::std::optional<at::Tensor> input_bias_;
      if (at::functionalization::impl::isFunctionalTensor(input_bias)) {
        at::functionalization::impl::sync(input_bias);
        input_bias_ = at::functionalization::impl::from_functional_tensor(input_bias);
      } else {
        input_bias_ = input_bias;
      }
      
      ::std::optional<at::Tensor> hidden_bias_;
      if (at::functionalization::impl::isFunctionalTensor(hidden_bias)) {
        at::functionalization::impl::sync(hidden_bias);
        hidden_bias_ = at::functionalization::impl::from_functional_tensor(hidden_bias);
      } else {
        hidden_bias_ = hidden_bias;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input_gates.device().type() == c10::DeviceType::XLA || hidden_gates.device().type() == c10::DeviceType::XLA || hx.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input_gates) || at::functionalization::impl::isFunctionalTensor(hidden_gates) || at::functionalization::impl::isFunctionalTensor(hx) || at::functionalization::impl::isFunctionalTensor(input_bias) || at::functionalization::impl::isFunctionalTensor(hidden_bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_thnn_fused_gru_cell_out::call(input_gates_, hidden_gates_, hx_, input_bias_, hidden_bias_, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_thnn_fused_gru_cell::call(input_gates_, hidden_gates_, hx_, input_bias_, hidden_bias_);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_gru_cell_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_hy_meta = to_meta(grad_hy);
        auto workspace_meta = to_meta(workspace);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        auto out3_meta = to_meta(out3);
        auto out4_meta = to_meta(out4);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_thnn_fused_gru_cell_backward_out::call(grad_hy_meta, workspace_meta, has_bias, out0_meta, out1_meta, out2_meta, out3_meta, out4_meta);
      }
      
      at::Tensor grad_hy_;
      if (at::functionalization::impl::isFunctionalTensor(grad_hy)) {
        at::functionalization::impl::sync(grad_hy);
        grad_hy_ = at::functionalization::impl::from_functional_tensor(grad_hy);
      } else {
        grad_hy_ = grad_hy;
      }
      
      at::Tensor workspace_;
      if (at::functionalization::impl::isFunctionalTensor(workspace)) {
        at::functionalization::impl::sync(workspace);
        workspace_ = at::functionalization::impl::from_functional_tensor(workspace);
      } else {
        workspace_ = workspace;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      
      at::Tensor out3_;
      if (at::functionalization::impl::isFunctionalTensor(out3)) {
        at::functionalization::impl::sync(out3);
        out3_ = at::functionalization::impl::from_functional_tensor(out3);
      } else {
        out3_ = out3;
      }
      
      at::Tensor out4_;
      if (at::functionalization::impl::isFunctionalTensor(out4)) {
        at::functionalization::impl::sync(out4);
        out4_ = at::functionalization::impl::from_functional_tensor(out4);
      } else {
        out4_ = out4;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3) && at::functionalization::impl::isFunctionalTensor(out4))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_hy.device().type() == c10::DeviceType::XLA || workspace.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_hy) || at::functionalization::impl::isFunctionalTensor(workspace))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_thnn_fused_gru_cell_backward_out::call(grad_hy_, workspace_, has_bias, out0_, out1_, out2_, out3_, out4_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3, out4);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_thnn_fused_gru_cell_backward::call(grad_hy_, workspace_, has_bias);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
  auto out3_inner = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::replace_(out3, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(out3);
  at::functionalization::impl::sync(out3);
  auto out3_inner_updated = at::functionalization::impl::from_functional_tensor(out3);
  at::functionalization::impl::propagate_xla_data_direct(out3_inner, out3_inner_updated);
  auto out4_inner = at::functionalization::impl::from_functional_tensor(out4);
  at::functionalization::impl::replace_(out4, std::get<4>(tmp_output));
  at::functionalization::impl::commit_update(out4);
  at::functionalization::impl::sync(out4);
  auto out4_inner_updated = at::functionalization::impl::from_functional_tensor(out4);
  at::functionalization::impl::propagate_xla_data_direct(out4_inner, out4_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3, out4);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> _pack_padded_sequence_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & lengths, bool batch_first, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto lengths_meta = to_meta(lengths);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_pack_padded_sequence_out::call(input_meta, lengths_meta, batch_first, out0_meta, out1_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor lengths_;
      if (at::functionalization::impl::isFunctionalTensor(lengths)) {
        at::functionalization::impl::sync(lengths);
        lengths_ = at::functionalization::impl::from_functional_tensor(lengths);
      } else {
        lengths_ = lengths;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || lengths.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(lengths))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_pack_padded_sequence_out::call(input_, lengths_, batch_first, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_pack_padded_sequence::call(input_, lengths_, batch_first);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    at::Tensor & set_out_source_Storage_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::set_source_Storage_out::call(self_meta, source, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::set_source_Storage_out::call(self_, source, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::set_source_Storage::call(self_, source);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & set__source_Storage(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Storage source) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::set__source_Storage::call(self_meta, source);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::set__source_Storage::call(self_, source);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::set_source_Storage::call(self_, source);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & set_out_source_Storage_storage_offset_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::set_source_Storage_storage_offset_out::call(self_meta, source, storage_offset, size, stride, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::set_source_Storage_storage_offset_out::call(self_, source, storage_offset, size, stride, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::set_source_Storage_storage_offset::call(self_, source, storage_offset, size, stride);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & set__source_Storage_storage_offset(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::set__source_Storage_storage_offset::call(self_meta, source, storage_offset, size, stride);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::set__source_Storage_storage_offset::call(self_, source, storage_offset, size, stride);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::set_source_Storage_storage_offset::call(self_, source, storage_offset, size, stride);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & set_out_source_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & source, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto source_meta = to_meta(source);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::set_source_Tensor_out::call(self_meta, source_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor source_;
      if (at::functionalization::impl::isFunctionalTensor(source)) {
        at::functionalization::impl::sync(source);
        source_ = at::functionalization::impl::from_functional_tensor(source);
      } else {
        source_ = source;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || source.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(source))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::set_source_Tensor_out::call(self_, source_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::set_source_Tensor::call(self_, source_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & set__source_Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & source) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto source_meta = to_meta(source);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::set__source_Tensor::call(self_meta, source_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor source_;
      if (at::functionalization::impl::isFunctionalTensor(source)) {
        at::functionalization::impl::sync(source);
        source_ = at::functionalization::impl::from_functional_tensor(source);
      } else {
        source_ = source;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || source.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(source))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::set__source_Tensor::call(self_, source_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::set_source_Tensor::call(self_, source_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & set_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::set_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::set_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::set::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & set_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::set_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::set_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::set::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & lift_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::lift_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::lift_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::lift::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & lift_fresh_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::lift_fresh_copy_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::lift_fresh_copy_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::lift_fresh_copy::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & masked_fill_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto mask_meta = to_meta(mask);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::masked_fill_Scalar_out::call(self_meta, mask_meta, value, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor mask_;
      if (at::functionalization::impl::isFunctionalTensor(mask)) {
        at::functionalization::impl::sync(mask);
        mask_ = at::functionalization::impl::from_functional_tensor(mask);
      } else {
        mask_ = mask;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || mask.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mask))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::masked_fill_Scalar_out::call(self_, mask_, value, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::masked_fill_Scalar::call(self_, mask_, value);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & masked_fill__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto mask_meta = to_meta(mask);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::masked_fill__Scalar::call(self_meta, mask_meta, value);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor mask_;
      if (at::functionalization::impl::isFunctionalTensor(mask)) {
        at::functionalization::impl::sync(mask);
        mask_ = at::functionalization::impl::from_functional_tensor(mask);
      } else {
        mask_ = mask;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || mask.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(mask))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::masked_fill__Scalar::call(self_, mask_, value);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::masked_fill_Scalar::call(self_, mask_, value);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & masked_fill_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto mask_meta = to_meta(mask);
        auto value_meta = to_meta(value);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::masked_fill_Tensor_out::call(self_meta, mask_meta, value_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor mask_;
      if (at::functionalization::impl::isFunctionalTensor(mask)) {
        at::functionalization::impl::sync(mask);
        mask_ = at::functionalization::impl::from_functional_tensor(mask);
      } else {
        mask_ = mask;
      }
      
      at::Tensor value_;
      if (at::functionalization::impl::isFunctionalTensor(value)) {
        at::functionalization::impl::sync(value);
        value_ = at::functionalization::impl::from_functional_tensor(value);
      } else {
        value_ = value;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || mask.device().type() == c10::DeviceType::XLA || value.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mask) || at::functionalization::impl::isFunctionalTensor(value))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::masked_fill_Tensor_out::call(self_, mask_, value_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::masked_fill_Tensor::call(self_, mask_, value_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & masked_fill__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto mask_meta = to_meta(mask);
        auto value_meta = to_meta(value);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::masked_fill__Tensor::call(self_meta, mask_meta, value_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor mask_;
      if (at::functionalization::impl::isFunctionalTensor(mask)) {
        at::functionalization::impl::sync(mask);
        mask_ = at::functionalization::impl::from_functional_tensor(mask);
      } else {
        mask_ = mask;
      }
      
      at::Tensor value_;
      if (at::functionalization::impl::isFunctionalTensor(value)) {
        at::functionalization::impl::sync(value);
        value_ = at::functionalization::impl::from_functional_tensor(value);
      } else {
        value_ = value;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || mask.device().type() == c10::DeviceType::XLA || value.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(mask) || at::functionalization::impl::isFunctionalTensor(value))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::masked_fill__Tensor::call(self_, mask_, value_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::masked_fill_Tensor::call(self_, mask_, value_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & masked_scatter_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto mask_meta = to_meta(mask);
        auto source_meta = to_meta(source);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::masked_scatter_out::call(self_meta, mask_meta, source_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor mask_;
      if (at::functionalization::impl::isFunctionalTensor(mask)) {
        at::functionalization::impl::sync(mask);
        mask_ = at::functionalization::impl::from_functional_tensor(mask);
      } else {
        mask_ = mask;
      }
      
      at::Tensor source_;
      if (at::functionalization::impl::isFunctionalTensor(source)) {
        at::functionalization::impl::sync(source);
        source_ = at::functionalization::impl::from_functional_tensor(source);
      } else {
        source_ = source;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || mask.device().type() == c10::DeviceType::XLA || source.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mask) || at::functionalization::impl::isFunctionalTensor(source))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::masked_scatter_out::call(self_, mask_, source_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::masked_scatter::call(self_, mask_, source_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & masked_scatter_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto mask_meta = to_meta(mask);
        auto source_meta = to_meta(source);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::masked_scatter_::call(self_meta, mask_meta, source_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor mask_;
      if (at::functionalization::impl::isFunctionalTensor(mask)) {
        at::functionalization::impl::sync(mask);
        mask_ = at::functionalization::impl::from_functional_tensor(mask);
      } else {
        mask_ = mask;
      }
      
      at::Tensor source_;
      if (at::functionalization::impl::isFunctionalTensor(source)) {
        at::functionalization::impl::sync(source);
        source_ = at::functionalization::impl::from_functional_tensor(source);
      } else {
        source_ = source;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || mask.device().type() == c10::DeviceType::XLA || source.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(mask) || at::functionalization::impl::isFunctionalTensor(source))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::masked_scatter_::call(self_, mask_, source_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::masked_scatter::call(self_, mask_, source_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & _masked_softmax_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, ::std::optional<int64_t> dim, ::std::optional<int64_t> mask_type, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto mask_meta = to_meta(mask);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_masked_softmax_out::call(self_meta, mask_meta, dim, mask_type, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor mask_;
      if (at::functionalization::impl::isFunctionalTensor(mask)) {
        at::functionalization::impl::sync(mask);
        mask_ = at::functionalization::impl::from_functional_tensor(mask);
      } else {
        mask_ = mask;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || mask.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mask))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_masked_softmax_out::call(self_, mask_, dim, mask_type, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_masked_softmax::call(self_, mask_, dim, mask_type);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _masked_softmax_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, ::std::optional<int64_t> dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto output_meta = to_meta(output);
        auto mask_meta = to_meta(mask);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_masked_softmax_backward_out::call(grad_output_meta, output_meta, mask_meta, dim, out_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor output_;
      if (at::functionalization::impl::isFunctionalTensor(output)) {
        at::functionalization::impl::sync(output);
        output_ = at::functionalization::impl::from_functional_tensor(output);
      } else {
        output_ = output;
      }
      
      at::Tensor mask_;
      if (at::functionalization::impl::isFunctionalTensor(mask)) {
        at::functionalization::impl::sync(mask);
        mask_ = at::functionalization::impl::from_functional_tensor(mask);
      } else {
        mask_ = mask;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || output.device().type() == c10::DeviceType::XLA || mask.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(output) || at::functionalization::impl::isFunctionalTensor(mask))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_masked_softmax_backward_out::call(grad_output_, output_, mask_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_masked_softmax_backward::call(grad_output_, output_, mask_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & put_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto source_meta = to_meta(source);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::put_out::call(self_meta, index_meta, source_meta, accumulate, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor source_;
      if (at::functionalization::impl::isFunctionalTensor(source)) {
        at::functionalization::impl::sync(source);
        source_ = at::functionalization::impl::from_functional_tensor(source);
      } else {
        source_ = source;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || index.device().type() == c10::DeviceType::XLA || source.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(source))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::put_out::call(self_, index_, source_, accumulate, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::put::call(self_, index_, source_, accumulate);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & put_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto source_meta = to_meta(source);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::put_::call(self_meta, index_meta, source_meta, accumulate);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor source_;
      if (at::functionalization::impl::isFunctionalTensor(source)) {
        at::functionalization::impl::sync(source);
        source_ = at::functionalization::impl::from_functional_tensor(source);
      } else {
        source_ = source;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || index.device().type() == c10::DeviceType::XLA || source.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(source))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::put_::call(self_, index_, source_, accumulate);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::put::call(self_, index_, source_, accumulate);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & index_add_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto source_meta = to_meta(source);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::index_add_out::call(self_meta, dim, index_meta, source_meta, alpha, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor source_;
      if (at::functionalization::impl::isFunctionalTensor(source)) {
        at::functionalization::impl::sync(source);
        source_ = at::functionalization::impl::from_functional_tensor(source);
      } else {
        source_ = source;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || index.device().type() == c10::DeviceType::XLA || source.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(source))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::index_add_out::call(self_, dim, index_, source_, alpha, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::index_add::call(self_, dim, index_, source_, alpha);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & index_add_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto source_meta = to_meta(source);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::index_add_::call(self_meta, dim, index_meta, source_meta, alpha);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor source_;
      if (at::functionalization::impl::isFunctionalTensor(source)) {
        at::functionalization::impl::sync(source);
        source_ = at::functionalization::impl::from_functional_tensor(source);
      } else {
        source_ = source;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || index.device().type() == c10::DeviceType::XLA || source.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(source))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::index_add_::call(self_, dim, index_, source_, alpha);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::index_add::call(self_, dim, index_, source_, alpha);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & index_reduce_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto source_meta = to_meta(source);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::index_reduce_out::call(self_meta, dim, index_meta, source_meta, reduce, include_self, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor source_;
      if (at::functionalization::impl::isFunctionalTensor(source)) {
        at::functionalization::impl::sync(source);
        source_ = at::functionalization::impl::from_functional_tensor(source);
      } else {
        source_ = source;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || index.device().type() == c10::DeviceType::XLA || source.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(source))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::index_reduce_out::call(self_, dim, index_, source_, reduce, include_self, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::index_reduce::call(self_, dim, index_, source_, reduce, include_self);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & index_reduce_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto source_meta = to_meta(source);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::index_reduce_::call(self_meta, dim, index_meta, source_meta, reduce, include_self);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor source_;
      if (at::functionalization::impl::isFunctionalTensor(source)) {
        at::functionalization::impl::sync(source);
        source_ = at::functionalization::impl::from_functional_tensor(source);
      } else {
        source_ = source;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || index.device().type() == c10::DeviceType::XLA || source.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(source))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::index_reduce_::call(self_, dim, index_, source_, reduce, include_self);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::index_reduce::call(self_, dim, index_, source_, reduce, include_self);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & index_fill_out_int_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::index_fill_int_Scalar_out::call(self_meta, dim, index_meta, value, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || index.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::index_fill_int_Scalar_out::call(self_, dim, index_, value, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::index_fill_int_Scalar::call(self_, dim, index_, value);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & index_fill__int_Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::index_fill__int_Scalar::call(self_meta, dim, index_meta, value);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || index.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(index))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::index_fill__int_Scalar::call(self_, dim, index_, value);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::index_fill_int_Scalar::call(self_, dim, index_, value);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & index_fill_out_int_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto value_meta = to_meta(value);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::index_fill_int_Tensor_out::call(self_meta, dim, index_meta, value_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor value_;
      if (at::functionalization::impl::isFunctionalTensor(value)) {
        at::functionalization::impl::sync(value);
        value_ = at::functionalization::impl::from_functional_tensor(value);
      } else {
        value_ = value;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || index.device().type() == c10::DeviceType::XLA || value.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(value))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::index_fill_int_Tensor_out::call(self_, dim, index_, value_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::index_fill_int_Tensor::call(self_, dim, index_, value_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & index_fill__int_Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto value_meta = to_meta(value);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::index_fill__int_Tensor::call(self_meta, dim, index_meta, value_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor value_;
      if (at::functionalization::impl::isFunctionalTensor(value)) {
        at::functionalization::impl::sync(value);
        value_ = at::functionalization::impl::from_functional_tensor(value);
      } else {
        value_ = value;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || index.device().type() == c10::DeviceType::XLA || value.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(value))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::index_fill__int_Tensor::call(self_, dim, index_, value_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::index_fill_int_Tensor::call(self_, dim, index_, value_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & scatter_out_src_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto src_meta = to_meta(src);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::scatter_src_out::call(self_meta, dim, index_meta, src_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor src_;
      if (at::functionalization::impl::isFunctionalTensor(src)) {
        at::functionalization::impl::sync(src);
        src_ = at::functionalization::impl::from_functional_tensor(src);
      } else {
        src_ = src;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || index.device().type() == c10::DeviceType::XLA || src.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(src))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::scatter_src_out::call(self_, dim, index_, src_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::scatter_src::call(self_, dim, index_, src_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & scatter__src(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto src_meta = to_meta(src);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::scatter__src::call(self_meta, dim, index_meta, src_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor src_;
      if (at::functionalization::impl::isFunctionalTensor(src)) {
        at::functionalization::impl::sync(src);
        src_ = at::functionalization::impl::from_functional_tensor(src);
      } else {
        src_ = src;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || index.device().type() == c10::DeviceType::XLA || src.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(src))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::scatter__src::call(self_, dim, index_, src_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::scatter_src::call(self_, dim, index_, src_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & scatter_out_value_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::scatter_value_out::call(self_meta, dim, index_meta, value, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || index.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::scatter_value_out::call(self_, dim, index_, value, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::scatter_value::call(self_, dim, index_, value);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & scatter__value(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::scatter__value::call(self_meta, dim, index_meta, value);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || index.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(index))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::scatter__value::call(self_, dim, index_, value);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::scatter_value::call(self_, dim, index_, value);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & scatter_out_reduce_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto src_meta = to_meta(src);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::scatter_reduce_out::call(self_meta, dim, index_meta, src_meta, reduce, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor src_;
      if (at::functionalization::impl::isFunctionalTensor(src)) {
        at::functionalization::impl::sync(src);
        src_ = at::functionalization::impl::from_functional_tensor(src);
      } else {
        src_ = src;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || index.device().type() == c10::DeviceType::XLA || src.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(src))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::scatter_reduce_out::call(self_, dim, index_, src_, reduce, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::scatter_reduce::call(self_, dim, index_, src_, reduce);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & scatter__reduce(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto src_meta = to_meta(src);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::scatter__reduce::call(self_meta, dim, index_meta, src_meta, reduce);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor src_;
      if (at::functionalization::impl::isFunctionalTensor(src)) {
        at::functionalization::impl::sync(src);
        src_ = at::functionalization::impl::from_functional_tensor(src);
      } else {
        src_ = src;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || index.device().type() == c10::DeviceType::XLA || src.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(src))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::scatter__reduce::call(self_, dim, index_, src_, reduce);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::scatter_reduce::call(self_, dim, index_, src_, reduce);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & scatter_out_value_reduce_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::scatter_value_reduce_out::call(self_meta, dim, index_meta, value, reduce, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || index.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::scatter_value_reduce_out::call(self_, dim, index_, value, reduce, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::scatter_value_reduce::call(self_, dim, index_, value, reduce);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & scatter__value_reduce(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::scatter__value_reduce::call(self_meta, dim, index_meta, value, reduce);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || index.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(index))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::scatter__value_reduce::call(self_, dim, index_, value, reduce);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::scatter_value_reduce::call(self_, dim, index_, value, reduce);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & scatter_add_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto src_meta = to_meta(src);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::scatter_add_out::call(self_meta, dim, index_meta, src_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor src_;
      if (at::functionalization::impl::isFunctionalTensor(src)) {
        at::functionalization::impl::sync(src);
        src_ = at::functionalization::impl::from_functional_tensor(src);
      } else {
        src_ = src;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || index.device().type() == c10::DeviceType::XLA || src.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(src))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::scatter_add_out::call(self_, dim, index_, src_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::scatter_add::call(self_, dim, index_, src_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & scatter_add_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto src_meta = to_meta(src);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::scatter_add_::call(self_meta, dim, index_meta, src_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor src_;
      if (at::functionalization::impl::isFunctionalTensor(src)) {
        at::functionalization::impl::sync(src);
        src_ = at::functionalization::impl::from_functional_tensor(src);
      } else {
        src_ = src;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || index.device().type() == c10::DeviceType::XLA || src.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(src))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::scatter_add_::call(self_, dim, index_, src_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::scatter_add::call(self_, dim, index_, src_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & scatter_reduce_out_two_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto src_meta = to_meta(src);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::scatter_reduce_two_out::call(self_meta, dim, index_meta, src_meta, reduce, include_self, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor src_;
      if (at::functionalization::impl::isFunctionalTensor(src)) {
        at::functionalization::impl::sync(src);
        src_ = at::functionalization::impl::from_functional_tensor(src);
      } else {
        src_ = src;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || index.device().type() == c10::DeviceType::XLA || src.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(src))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::scatter_reduce_two_out::call(self_, dim, index_, src_, reduce, include_self, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::scatter_reduce_two::call(self_, dim, index_, src_, reduce, include_self);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & scatter_reduce__two(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto src_meta = to_meta(src);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::scatter_reduce__two::call(self_meta, dim, index_meta, src_meta, reduce, include_self);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor src_;
      if (at::functionalization::impl::isFunctionalTensor(src)) {
        at::functionalization::impl::sync(src);
        src_ = at::functionalization::impl::from_functional_tensor(src);
      } else {
        src_ = src;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || index.device().type() == c10::DeviceType::XLA || src.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(src))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::scatter_reduce__two::call(self_, dim, index_, src_, reduce, include_self);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::scatter_reduce_two::call(self_, dim, index_, src_, reduce, include_self);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & eq_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::eq_Scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::eq_Scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::eq_Scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & eq__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::eq__Scalar::call(self_meta, other);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::eq__Scalar::call(self_, other);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::eq_Scalar::call(self_, other);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & eq_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::eq_Tensor_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::eq_Tensor_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::eq_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & eq__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::eq__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::eq__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::eq_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & bitwise_and_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_and_Tensor_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_and_Tensor_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_and_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & bitwise_and__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_and__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_and__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_and_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & bitwise_and_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_and_Scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_and_Scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_and_Scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & bitwise_and__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_and__Scalar::call(self_meta, other);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_and__Scalar::call(self_, other);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_and_Scalar::call(self_, other);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & bitwise_and_out_Scalar_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_and_Scalar_Tensor_out::call(self, other_meta, out_meta);
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_and_Scalar_Tensor_out::call(self, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_and_Scalar_Tensor::call(self, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & bitwise_or_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_or_Tensor_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_or_Tensor_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_or_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & bitwise_or__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_or__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_or__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_or_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & bitwise_or_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_or_Scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_or_Scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_or_Scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & bitwise_or__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_or__Scalar::call(self_meta, other);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_or__Scalar::call(self_, other);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_or_Scalar::call(self_, other);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & bitwise_or_out_Scalar_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_or_Scalar_Tensor_out::call(self, other_meta, out_meta);
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_or_Scalar_Tensor_out::call(self, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_or_Scalar_Tensor::call(self, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & bitwise_xor_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_xor_Tensor_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_xor_Tensor_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_xor_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & bitwise_xor__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_xor__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_xor__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_xor_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & bitwise_xor_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_xor_Scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_xor_Scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_xor_Scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & bitwise_xor__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_xor__Scalar::call(self_meta, other);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_xor__Scalar::call(self_, other);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_xor_Scalar::call(self_, other);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & bitwise_xor_out_Scalar_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_xor_Scalar_Tensor_out::call(self, other_meta, out_meta);
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_xor_Scalar_Tensor_out::call(self, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_xor_Scalar_Tensor::call(self, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & __lshift___out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::__lshift___Scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::__lshift___Scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::__lshift___Scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & __ilshift___Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::__ilshift___Scalar::call(self_meta, other);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::__ilshift___Scalar::call(self_, other);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::__lshift___Scalar::call(self_, other);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & __lshift___out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::__lshift___Tensor_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::__lshift___Tensor_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::__lshift___Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & __ilshift___Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::__ilshift___Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::__ilshift___Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::__lshift___Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & bitwise_left_shift_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_left_shift_Tensor_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_left_shift_Tensor_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_left_shift_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & bitwise_left_shift__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_left_shift__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_left_shift__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_left_shift_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & bitwise_left_shift_out_Tensor_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_left_shift_Tensor_Scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_left_shift_Tensor_Scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_left_shift_Tensor_Scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & bitwise_left_shift__Tensor_Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_left_shift__Tensor_Scalar::call(self_meta, other);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_left_shift__Tensor_Scalar::call(self_, other);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_left_shift_Tensor_Scalar::call(self_, other);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & bitwise_left_shift_out_Scalar_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_left_shift_Scalar_Tensor_out::call(self, other_meta, out_meta);
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_left_shift_Scalar_Tensor_out::call(self, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_left_shift_Scalar_Tensor::call(self, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & __rshift___out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::__rshift___Scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::__rshift___Scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::__rshift___Scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & __irshift___Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::__irshift___Scalar::call(self_meta, other);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::__irshift___Scalar::call(self_, other);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::__rshift___Scalar::call(self_, other);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & __rshift___out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::__rshift___Tensor_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::__rshift___Tensor_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::__rshift___Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & __irshift___Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::__irshift___Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::__irshift___Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::__rshift___Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & bitwise_right_shift_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_right_shift_Tensor_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_right_shift_Tensor_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_right_shift_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & bitwise_right_shift__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_right_shift__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_right_shift__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_right_shift_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & bitwise_right_shift_out_Tensor_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_right_shift_Tensor_Scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_right_shift_Tensor_Scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_right_shift_Tensor_Scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & bitwise_right_shift__Tensor_Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_right_shift__Tensor_Scalar::call(self_meta, other);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_right_shift__Tensor_Scalar::call(self_, other);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_right_shift_Tensor_Scalar::call(self_, other);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & bitwise_right_shift_out_Scalar_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bitwise_right_shift_Scalar_Tensor_out::call(self, other_meta, out_meta);
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bitwise_right_shift_Scalar_Tensor_out::call(self, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bitwise_right_shift_Scalar_Tensor::call(self, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & tril_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::tril_out::call(self_meta, diagonal, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::tril_out::call(self_, diagonal, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::tril::call(self_, diagonal);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & tril_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t diagonal) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::tril_::call(self_meta, diagonal);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::tril_::call(self_, diagonal);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::tril::call(self_, diagonal);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & triu_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::triu_out::call(self_meta, diagonal, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::triu_out::call(self_, diagonal, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::triu::call(self_, diagonal);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & triu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t diagonal) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::triu_::call(self_meta, diagonal);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::triu_::call(self_, diagonal);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::triu::call(self_, diagonal);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & digamma_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::digamma_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::digamma_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::digamma::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & digamma_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::digamma_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::digamma_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::digamma::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & lerp_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto end_meta = to_meta(end);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::lerp_Scalar_out::call(self_meta, end_meta, weight, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor end_;
      if (at::functionalization::impl::isFunctionalTensor(end)) {
        at::functionalization::impl::sync(end);
        end_ = at::functionalization::impl::from_functional_tensor(end);
      } else {
        end_ = end;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || end.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(end))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::lerp_Scalar_out::call(self_, end_, weight, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::lerp_Scalar::call(self_, end_, weight);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & lerp__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto end_meta = to_meta(end);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::lerp__Scalar::call(self_meta, end_meta, weight);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor end_;
      if (at::functionalization::impl::isFunctionalTensor(end)) {
        at::functionalization::impl::sync(end);
        end_ = at::functionalization::impl::from_functional_tensor(end);
      } else {
        end_ = end;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || end.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(end))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::lerp__Scalar::call(self_, end_, weight);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::lerp_Scalar::call(self_, end_, weight);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & lerp_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto end_meta = to_meta(end);
        auto weight_meta = to_meta(weight);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::lerp_Tensor_out::call(self_meta, end_meta, weight_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor end_;
      if (at::functionalization::impl::isFunctionalTensor(end)) {
        at::functionalization::impl::sync(end);
        end_ = at::functionalization::impl::from_functional_tensor(end);
      } else {
        end_ = end;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || end.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(end) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::lerp_Tensor_out::call(self_, end_, weight_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::lerp_Tensor::call(self_, end_, weight_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & lerp__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto end_meta = to_meta(end);
        auto weight_meta = to_meta(weight);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::lerp__Tensor::call(self_meta, end_meta, weight_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor end_;
      if (at::functionalization::impl::isFunctionalTensor(end)) {
        at::functionalization::impl::sync(end);
        end_ = at::functionalization::impl::from_functional_tensor(end);
      } else {
        end_ = end;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || end.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(end) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::lerp__Tensor::call(self_, end_, weight_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::lerp_Tensor::call(self_, end_, weight_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & addbmm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto batch1_meta = to_meta(batch1);
        auto batch2_meta = to_meta(batch2);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::addbmm_out::call(self_meta, batch1_meta, batch2_meta, beta, alpha, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor batch1_;
      if (at::functionalization::impl::isFunctionalTensor(batch1)) {
        at::functionalization::impl::sync(batch1);
        batch1_ = at::functionalization::impl::from_functional_tensor(batch1);
      } else {
        batch1_ = batch1;
      }
      
      at::Tensor batch2_;
      if (at::functionalization::impl::isFunctionalTensor(batch2)) {
        at::functionalization::impl::sync(batch2);
        batch2_ = at::functionalization::impl::from_functional_tensor(batch2);
      } else {
        batch2_ = batch2;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || batch1.device().type() == c10::DeviceType::XLA || batch2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(batch1) || at::functionalization::impl::isFunctionalTensor(batch2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::addbmm_out::call(self_, batch1_, batch2_, beta, alpha, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::addbmm::call(self_, batch1_, batch2_, beta, alpha);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & addbmm_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto batch1_meta = to_meta(batch1);
        auto batch2_meta = to_meta(batch2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::addbmm_::call(self_meta, batch1_meta, batch2_meta, beta, alpha);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor batch1_;
      if (at::functionalization::impl::isFunctionalTensor(batch1)) {
        at::functionalization::impl::sync(batch1);
        batch1_ = at::functionalization::impl::from_functional_tensor(batch1);
      } else {
        batch1_ = batch1;
      }
      
      at::Tensor batch2_;
      if (at::functionalization::impl::isFunctionalTensor(batch2)) {
        at::functionalization::impl::sync(batch2);
        batch2_ = at::functionalization::impl::from_functional_tensor(batch2);
      } else {
        batch2_ = batch2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || batch1.device().type() == c10::DeviceType::XLA || batch2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(batch1) || at::functionalization::impl::isFunctionalTensor(batch2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::addbmm_::call(self_, batch1_, batch2_, beta, alpha);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::addbmm::call(self_, batch1_, batch2_, beta, alpha);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & random_out_from_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::random_from_out::call(self_meta, from, to, generator, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::random_from_out::call(self_, from, to, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::random_from::call(self_, from, to, generator);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & random__from(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::random__from::call(self_meta, from, to, generator);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::random__from::call(self_, from, to, generator);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::random_from::call(self_, from, to, generator);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & random_out_to_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::random_to_out::call(self_meta, to, generator, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::random_to_out::call(self_, to, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::random_to::call(self_, to, generator);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & random__to(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::random__to::call(self_meta, to, generator);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::random__to::call(self_, to, generator);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::random_to::call(self_, to, generator);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & random_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::random_out::call(self_meta, generator, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::random_out::call(self_, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::random::call(self_, generator);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & random_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, ::std::optional<at::Generator> generator) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::random_::call(self_meta, generator);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::random_::call(self_, generator);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::random::call(self_, generator);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & uniform_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double from, double to, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::uniform_out::call(self_meta, from, to, generator, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::uniform_out::call(self_, from, to, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::uniform::call(self_, from, to, generator);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & uniform_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double from, double to, ::std::optional<at::Generator> generator) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::uniform_::call(self_meta, from, to, generator);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::uniform_::call(self_, from, to, generator);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::uniform::call(self_, from, to, generator);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & cauchy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double median, double sigma, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cauchy_out::call(self_meta, median, sigma, generator, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cauchy_out::call(self_, median, sigma, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cauchy::call(self_, median, sigma, generator);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & cauchy_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double median, double sigma, ::std::optional<at::Generator> generator) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cauchy_::call(self_meta, median, sigma, generator);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cauchy_::call(self_, median, sigma, generator);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cauchy::call(self_, median, sigma, generator);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & log_normal_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::log_normal_out::call(self_meta, mean, std, generator, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::log_normal_out::call(self_, mean, std, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::log_normal::call(self_, mean, std, generator);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & log_normal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::log_normal_::call(self_meta, mean, std, generator);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::log_normal_::call(self_, mean, std, generator);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::log_normal::call(self_, mean, std, generator);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & exponential_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double lambd, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::exponential_out::call(self_meta, lambd, generator, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::exponential_out::call(self_, lambd, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::exponential::call(self_, lambd, generator);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & exponential_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double lambd, ::std::optional<at::Generator> generator) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::exponential_::call(self_meta, lambd, generator);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::exponential_::call(self_, lambd, generator);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::exponential::call(self_, lambd, generator);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & geometric_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::geometric_out::call(self_meta, p, generator, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::geometric_out::call(self_, p, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::geometric::call(self_, p, generator);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & geometric_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, ::std::optional<at::Generator> generator) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::geometric_::call(self_meta, p, generator);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::geometric_::call(self_, p, generator);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::geometric::call(self_, p, generator);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & diag_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::diag_out::call(self_meta, diagonal, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::diag_out::call(self_, diagonal, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::diag::call(self_, diagonal);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & cross_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, ::std::optional<int64_t> dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cross_out::call(self_meta, other_meta, dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cross_out::call(self_, other_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cross::call(self_, other_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & tril_indices_out_out(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::tril_indices_out::call(row, col, offset, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::tril_indices_out::call(row, col, offset, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::tril_indices::call(row, col, offset, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & triu_indices_out_out(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::triu_indices_out::call(row, col, offset, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::triu_indices_out::call(row, col, offset, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::triu_indices::call(row, col, offset, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & trace_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::trace_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::trace_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::trace::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & ne_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::ne_Scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::ne_Scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::ne_Scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & ne__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::ne__Scalar::call(self_meta, other);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::ne__Scalar::call(self_, other);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::ne_Scalar::call(self_, other);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & ne_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::ne_Tensor_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::ne_Tensor_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::ne_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & ne__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::ne__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::ne__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::ne_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & not_equal_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::not_equal_Scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::not_equal_Scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::not_equal_Scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & not_equal__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::not_equal__Scalar::call(self_meta, other);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::not_equal__Scalar::call(self_, other);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::not_equal_Scalar::call(self_, other);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & not_equal_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::not_equal_Tensor_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::not_equal_Tensor_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::not_equal_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & not_equal__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::not_equal__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::not_equal__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::not_equal_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & ge_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::ge_Scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::ge_Scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::ge_Scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & ge__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::ge__Scalar::call(self_meta, other);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::ge__Scalar::call(self_, other);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::ge_Scalar::call(self_, other);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & ge_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::ge_Tensor_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::ge_Tensor_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::ge_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & ge__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::ge__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::ge__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::ge_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & greater_equal_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::greater_equal_Scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::greater_equal_Scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::greater_equal_Scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & greater_equal__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::greater_equal__Scalar::call(self_meta, other);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::greater_equal__Scalar::call(self_, other);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::greater_equal_Scalar::call(self_, other);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & greater_equal_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::greater_equal_Tensor_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::greater_equal_Tensor_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::greater_equal_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & greater_equal__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::greater_equal__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::greater_equal__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::greater_equal_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & le_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::le_Scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::le_Scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::le_Scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & le__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::le__Scalar::call(self_meta, other);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::le__Scalar::call(self_, other);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::le_Scalar::call(self_, other);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & le_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::le_Tensor_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::le_Tensor_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::le_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & le__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::le__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::le__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::le_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & less_equal_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::less_equal_Scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::less_equal_Scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::less_equal_Scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & less_equal__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::less_equal__Scalar::call(self_meta, other);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::less_equal__Scalar::call(self_, other);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::less_equal_Scalar::call(self_, other);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & less_equal_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::less_equal_Tensor_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::less_equal_Tensor_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::less_equal_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & less_equal__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::less_equal__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::less_equal__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::less_equal_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & gt_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::gt_Scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::gt_Scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::gt_Scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & gt__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::gt__Scalar::call(self_meta, other);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::gt__Scalar::call(self_, other);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::gt_Scalar::call(self_, other);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & gt_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::gt_Tensor_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::gt_Tensor_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::gt_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & gt__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::gt__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::gt__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::gt_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & greater_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::greater_Scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::greater_Scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::greater_Scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & greater__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::greater__Scalar::call(self_meta, other);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::greater__Scalar::call(self_, other);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::greater_Scalar::call(self_, other);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & greater_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::greater_Tensor_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::greater_Tensor_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::greater_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & greater__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::greater__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::greater__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::greater_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & lt_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::lt_Scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::lt_Scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::lt_Scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & lt__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::lt__Scalar::call(self_meta, other);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::lt__Scalar::call(self_, other);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::lt_Scalar::call(self_, other);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & lt_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::lt_Tensor_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::lt_Tensor_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::lt_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & lt__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::lt__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::lt__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::lt_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & less_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::less_Scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::less_Scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::less_Scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & less__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::less__Scalar::call(self_meta, other);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::less__Scalar::call(self_, other);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::less_Scalar::call(self_, other);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & less_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::less_Tensor_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::less_Tensor_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::less_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & less__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::less__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::less__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::less_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & take_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::take_out::call(self_meta, index_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || index.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::take_out::call(self_, index_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::take::call(self_, index_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & take_along_dim_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, ::std::optional<int64_t> dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto indices_meta = to_meta(indices);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::take_along_dim_out::call(self_meta, indices_meta, dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || indices.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::take_along_dim_out::call(self_, indices_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::take_along_dim::call(self_, indices_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & index_select_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::index_select_out::call(self_meta, dim, index_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || index.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::index_select_out::call(self_, dim, index_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::index_select::call(self_, dim, index_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & index_select_out_dimname_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::index_select_dimname_out::call(self_meta, dim, index_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || index.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::index_select_dimname_out::call(self_, dim, index_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::index_select_dimname::call(self_, dim, index_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & masked_select_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto mask_meta = to_meta(mask);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::masked_select_out::call(self_meta, mask_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor mask_;
      if (at::functionalization::impl::isFunctionalTensor(mask)) {
        at::functionalization::impl::sync(mask);
        mask_ = at::functionalization::impl::from_functional_tensor(mask);
      } else {
        mask_ = mask;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || mask.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mask))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::masked_select_out::call(self_, mask_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::masked_select::call(self_, mask_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & nonzero_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::nonzero_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::nonzero_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::nonzero::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & nonzero_static_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt size, int64_t fill_value, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::nonzero_static_out::call(self_meta, size, fill_value, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::nonzero_static_out::call(self_, size, fill_value, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::nonzero_static::call(self_, size, fill_value);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & gather_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::gather_out::call(self_meta, dim, index_meta, sparse_grad, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || index.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::gather_out::call(self_, dim, index_, sparse_grad, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::gather::call(self_, dim, index_, sparse_grad);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & gather_out_dimname_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto index_meta = to_meta(index);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::gather_dimname_out::call(self_meta, dim, index_meta, sparse_grad, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor index_;
      if (at::functionalization::impl::isFunctionalTensor(index)) {
        at::functionalization::impl::sync(index);
        index_ = at::functionalization::impl::from_functional_tensor(index);
      } else {
        index_ = index;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || index.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::gather_dimname_out::call(self_, dim, index_, sparse_grad, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::gather_dimname::call(self_, dim, index_, sparse_grad);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & addcmul_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto tensor1_meta = to_meta(tensor1);
        auto tensor2_meta = to_meta(tensor2);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::addcmul_out::call(self_meta, tensor1_meta, tensor2_meta, value, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor tensor1_;
      if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
        at::functionalization::impl::sync(tensor1);
        tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
      } else {
        tensor1_ = tensor1;
      }
      
      at::Tensor tensor2_;
      if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
        at::functionalization::impl::sync(tensor2);
        tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
      } else {
        tensor2_ = tensor2;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || tensor1.device().type() == c10::DeviceType::XLA || tensor2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::addcmul_out::call(self_, tensor1_, tensor2_, value, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::addcmul::call(self_, tensor1_, tensor2_, value);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & addcmul_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto tensor1_meta = to_meta(tensor1);
        auto tensor2_meta = to_meta(tensor2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::addcmul_::call(self_meta, tensor1_meta, tensor2_meta, value);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor tensor1_;
      if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
        at::functionalization::impl::sync(tensor1);
        tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
      } else {
        tensor1_ = tensor1;
      }
      
      at::Tensor tensor2_;
      if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
        at::functionalization::impl::sync(tensor2);
        tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
      } else {
        tensor2_ = tensor2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || tensor1.device().type() == c10::DeviceType::XLA || tensor2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::addcmul_::call(self_, tensor1_, tensor2_, value);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::addcmul::call(self_, tensor1_, tensor2_, value);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & addcdiv_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto tensor1_meta = to_meta(tensor1);
        auto tensor2_meta = to_meta(tensor2);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::addcdiv_out::call(self_meta, tensor1_meta, tensor2_meta, value, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor tensor1_;
      if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
        at::functionalization::impl::sync(tensor1);
        tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
      } else {
        tensor1_ = tensor1;
      }
      
      at::Tensor tensor2_;
      if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
        at::functionalization::impl::sync(tensor2);
        tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
      } else {
        tensor2_ = tensor2;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || tensor1.device().type() == c10::DeviceType::XLA || tensor2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::addcdiv_out::call(self_, tensor1_, tensor2_, value, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::addcdiv::call(self_, tensor1_, tensor2_, value);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & addcdiv_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto tensor1_meta = to_meta(tensor1);
        auto tensor2_meta = to_meta(tensor2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::addcdiv_::call(self_meta, tensor1_meta, tensor2_meta, value);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor tensor1_;
      if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
        at::functionalization::impl::sync(tensor1);
        tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
      } else {
        tensor1_ = tensor1;
      }
      
      at::Tensor tensor2_;
      if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
        at::functionalization::impl::sync(tensor2);
        tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
      } else {
        tensor2_ = tensor2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || tensor1.device().type() == c10::DeviceType::XLA || tensor2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::addcdiv_::call(self_, tensor1_, tensor2_, value);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::addcdiv::call(self_, tensor1_, tensor2_, value);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> triangular_solve_out_X(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular, at::Tensor & X, at::Tensor & M) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto A_meta = to_meta(A);
        auto X_meta = to_meta(X);
        auto M_meta = to_meta(M);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::triangular_solve_X::call(self_meta, A_meta, upper, transpose, unitriangular, X_meta, M_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor A_;
      if (at::functionalization::impl::isFunctionalTensor(A)) {
        at::functionalization::impl::sync(A);
        A_ = at::functionalization::impl::from_functional_tensor(A);
      } else {
        A_ = A;
      }
      
      at::Tensor X_;
      if (at::functionalization::impl::isFunctionalTensor(X)) {
        at::functionalization::impl::sync(X);
        X_ = at::functionalization::impl::from_functional_tensor(X);
      } else {
        X_ = X;
      }
      
      at::Tensor M_;
      if (at::functionalization::impl::isFunctionalTensor(M)) {
        at::functionalization::impl::sync(M);
        M_ = at::functionalization::impl::from_functional_tensor(M);
      } else {
        M_ = M;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(X) && at::functionalization::impl::isFunctionalTensor(M))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || A.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(A))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::triangular_solve_X::call(self_, A_, upper, transpose, unitriangular, X_, M_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(X, M);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::triangular_solve::call(self_, A_, upper, transpose, unitriangular);
        }
          auto X_inner = at::functionalization::impl::from_functional_tensor(X);
  at::functionalization::impl::replace_(X, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(X);
  at::functionalization::impl::sync(X);
  auto X_inner_updated = at::functionalization::impl::from_functional_tensor(X);
  at::functionalization::impl::propagate_xla_data_direct(X_inner, X_inner_updated);
  auto M_inner = at::functionalization::impl::from_functional_tensor(M);
  at::functionalization::impl::replace_(M, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(M);
  at::functionalization::impl::sync(M);
  auto M_inner_updated = at::functionalization::impl::from_functional_tensor(M);
  at::functionalization::impl::propagate_xla_data_direct(M_inner, M_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(X, M);
      }
    }

    at::Tensor & linalg_solve_triangular_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto B_meta = to_meta(B);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_solve_triangular_out::call(self_meta, B_meta, upper, left, unitriangular, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor B_;
      if (at::functionalization::impl::isFunctionalTensor(B)) {
        at::functionalization::impl::sync(B);
        B_ = at::functionalization::impl::from_functional_tensor(B);
      } else {
        B_ = B;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || B.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(B))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_solve_triangular_out::call(self_, B_, upper, left, unitriangular, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_solve_triangular::call(self_, B_, upper, left, unitriangular);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> svd_out_U(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool some, bool compute_uv, at::Tensor & U, at::Tensor & S, at::Tensor & V) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto U_meta = to_meta(U);
        auto S_meta = to_meta(S);
        auto V_meta = to_meta(V);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::svd_U::call(self_meta, some, compute_uv, U_meta, S_meta, V_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor U_;
      if (at::functionalization::impl::isFunctionalTensor(U)) {
        at::functionalization::impl::sync(U);
        U_ = at::functionalization::impl::from_functional_tensor(U);
      } else {
        U_ = U;
      }
      
      at::Tensor S_;
      if (at::functionalization::impl::isFunctionalTensor(S)) {
        at::functionalization::impl::sync(S);
        S_ = at::functionalization::impl::from_functional_tensor(S);
      } else {
        S_ = S;
      }
      
      at::Tensor V_;
      if (at::functionalization::impl::isFunctionalTensor(V)) {
        at::functionalization::impl::sync(V);
        V_ = at::functionalization::impl::from_functional_tensor(V);
      } else {
        V_ = V;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(U) && at::functionalization::impl::isFunctionalTensor(S) && at::functionalization::impl::isFunctionalTensor(V))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::svd_U::call(self_, some, compute_uv, U_, S_, V_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(U, S, V);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::svd::call(self_, some, compute_uv);
        }
          auto U_inner = at::functionalization::impl::from_functional_tensor(U);
  at::functionalization::impl::replace_(U, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(U);
  at::functionalization::impl::sync(U);
  auto U_inner_updated = at::functionalization::impl::from_functional_tensor(U);
  at::functionalization::impl::propagate_xla_data_direct(U_inner, U_inner_updated);
  auto S_inner = at::functionalization::impl::from_functional_tensor(S);
  at::functionalization::impl::replace_(S, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(S);
  at::functionalization::impl::sync(S);
  auto S_inner_updated = at::functionalization::impl::from_functional_tensor(S);
  at::functionalization::impl::propagate_xla_data_direct(S_inner, S_inner_updated);
  auto V_inner = at::functionalization::impl::from_functional_tensor(V);
  at::functionalization::impl::replace_(V, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(V);
  at::functionalization::impl::sync(V);
  auto V_inner_updated = at::functionalization::impl::from_functional_tensor(V);
  at::functionalization::impl::propagate_xla_data_direct(V_inner, V_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(U, S, V);
      }
    }

    at::Tensor & cholesky_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cholesky_out::call(self_meta, upper, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cholesky_out::call(self_, upper, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cholesky::call(self_, upper);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & cholesky_solve_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, bool upper, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto input2_meta = to_meta(input2);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cholesky_solve_out::call(self_meta, input2_meta, upper, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor input2_;
      if (at::functionalization::impl::isFunctionalTensor(input2)) {
        at::functionalization::impl::sync(input2);
        input2_ = at::functionalization::impl::from_functional_tensor(input2);
      } else {
        input2_ = input2;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || input2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(input2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cholesky_solve_out::call(self_, input2_, upper, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cholesky_solve::call(self_, input2_, upper);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _cholesky_solve_helper_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & A, bool upper, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto A_meta = to_meta(A);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_cholesky_solve_helper_out::call(self_meta, A_meta, upper, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor A_;
      if (at::functionalization::impl::isFunctionalTensor(A)) {
        at::functionalization::impl::sync(A);
        A_ = at::functionalization::impl::from_functional_tensor(A);
      } else {
        A_ = A;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || A.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(A))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_cholesky_solve_helper_out::call(self_, A_, upper, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_cholesky_solve_helper::call(self_, A_, upper);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & cholesky_inverse_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::cholesky_inverse_out::call(self_meta, upper, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::cholesky_inverse_out::call(self_, upper, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::cholesky_inverse::call(self_, upper);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> qr_out_Q(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool some, at::Tensor & Q, at::Tensor & R) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto Q_meta = to_meta(Q);
        auto R_meta = to_meta(R);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::qr_Q::call(self_meta, some, Q_meta, R_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor Q_;
      if (at::functionalization::impl::isFunctionalTensor(Q)) {
        at::functionalization::impl::sync(Q);
        Q_ = at::functionalization::impl::from_functional_tensor(Q);
      } else {
        Q_ = Q;
      }
      
      at::Tensor R_;
      if (at::functionalization::impl::isFunctionalTensor(R)) {
        at::functionalization::impl::sync(R);
        R_ = at::functionalization::impl::from_functional_tensor(R);
      } else {
        R_ = R;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(Q) && at::functionalization::impl::isFunctionalTensor(R))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::qr_Q::call(self_, some, Q_, R_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(Q, R);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::qr::call(self_, some);
        }
          auto Q_inner = at::functionalization::impl::from_functional_tensor(Q);
  at::functionalization::impl::replace_(Q, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(Q);
  at::functionalization::impl::sync(Q);
  auto Q_inner_updated = at::functionalization::impl::from_functional_tensor(Q);
  at::functionalization::impl::propagate_xla_data_direct(Q_inner, Q_inner_updated);
  auto R_inner = at::functionalization::impl::from_functional_tensor(R);
  at::functionalization::impl::replace_(R, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(R);
  at::functionalization::impl::sync(R);
  auto R_inner_updated = at::functionalization::impl::from_functional_tensor(R);
  at::functionalization::impl::propagate_xla_data_direct(R_inner, R_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(Q, R);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> geqrf_out_a(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & a, at::Tensor & tau) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto a_meta = to_meta(a);
        auto tau_meta = to_meta(tau);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::geqrf_a::call(self_meta, a_meta, tau_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor a_;
      if (at::functionalization::impl::isFunctionalTensor(a)) {
        at::functionalization::impl::sync(a);
        a_ = at::functionalization::impl::from_functional_tensor(a);
      } else {
        a_ = a;
      }
      
      at::Tensor tau_;
      if (at::functionalization::impl::isFunctionalTensor(tau)) {
        at::functionalization::impl::sync(tau);
        tau_ = at::functionalization::impl::from_functional_tensor(tau);
      } else {
        tau_ = tau;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(a) && at::functionalization::impl::isFunctionalTensor(tau))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::geqrf_a::call(self_, a_, tau_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(a, tau);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::geqrf::call(self_);
        }
          auto a_inner = at::functionalization::impl::from_functional_tensor(a);
  at::functionalization::impl::replace_(a, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(a);
  at::functionalization::impl::sync(a);
  auto a_inner_updated = at::functionalization::impl::from_functional_tensor(a);
  at::functionalization::impl::propagate_xla_data_direct(a_inner, a_inner_updated);
  auto tau_inner = at::functionalization::impl::from_functional_tensor(tau);
  at::functionalization::impl::replace_(tau, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(tau);
  at::functionalization::impl::sync(tau);
  auto tau_inner_updated = at::functionalization::impl::from_functional_tensor(tau);
  at::functionalization::impl::propagate_xla_data_direct(tau_inner, tau_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(a, tau);
      }
    }

    at::Tensor & orgqr_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto input2_meta = to_meta(input2);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::orgqr_out::call(self_meta, input2_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor input2_;
      if (at::functionalization::impl::isFunctionalTensor(input2)) {
        at::functionalization::impl::sync(input2);
        input2_ = at::functionalization::impl::from_functional_tensor(input2);
      } else {
        input2_ = input2;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || input2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(input2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::orgqr_out::call(self_, input2_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::orgqr::call(self_, input2_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & ormqr_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto input2_meta = to_meta(input2);
        auto input3_meta = to_meta(input3);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::ormqr_out::call(self_meta, input2_meta, input3_meta, left, transpose, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor input2_;
      if (at::functionalization::impl::isFunctionalTensor(input2)) {
        at::functionalization::impl::sync(input2);
        input2_ = at::functionalization::impl::from_functional_tensor(input2);
      } else {
        input2_ = input2;
      }
      
      at::Tensor input3_;
      if (at::functionalization::impl::isFunctionalTensor(input3)) {
        at::functionalization::impl::sync(input3);
        input3_ = at::functionalization::impl::from_functional_tensor(input3);
      } else {
        input3_ = input3;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || input2.device().type() == c10::DeviceType::XLA || input3.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(input2) || at::functionalization::impl::isFunctionalTensor(input3))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::ormqr_out::call(self_, input2_, input3_, left, transpose, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::ormqr::call(self_, input2_, input3_, left, transpose);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & lu_solve_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto LU_data_meta = to_meta(LU_data);
        auto LU_pivots_meta = to_meta(LU_pivots);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::lu_solve_out::call(self_meta, LU_data_meta, LU_pivots_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor LU_data_;
      if (at::functionalization::impl::isFunctionalTensor(LU_data)) {
        at::functionalization::impl::sync(LU_data);
        LU_data_ = at::functionalization::impl::from_functional_tensor(LU_data);
      } else {
        LU_data_ = LU_data;
      }
      
      at::Tensor LU_pivots_;
      if (at::functionalization::impl::isFunctionalTensor(LU_pivots)) {
        at::functionalization::impl::sync(LU_pivots);
        LU_pivots_ = at::functionalization::impl::from_functional_tensor(LU_pivots);
      } else {
        LU_pivots_ = LU_pivots;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || LU_data.device().type() == c10::DeviceType::XLA || LU_pivots.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(LU_data) || at::functionalization::impl::isFunctionalTensor(LU_pivots))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::lu_solve_out::call(self_, LU_data_, LU_pivots_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::lu_solve::call(self_, LU_data_, LU_pivots_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> lu_unpack_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor & P, at::Tensor & L, at::Tensor & U) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto LU_data_meta = to_meta(LU_data);
        auto LU_pivots_meta = to_meta(LU_pivots);
        auto P_meta = to_meta(P);
        auto L_meta = to_meta(L);
        auto U_meta = to_meta(U);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::lu_unpack_out::call(LU_data_meta, LU_pivots_meta, unpack_data, unpack_pivots, P_meta, L_meta, U_meta);
      }
      
      at::Tensor LU_data_;
      if (at::functionalization::impl::isFunctionalTensor(LU_data)) {
        at::functionalization::impl::sync(LU_data);
        LU_data_ = at::functionalization::impl::from_functional_tensor(LU_data);
      } else {
        LU_data_ = LU_data;
      }
      
      at::Tensor LU_pivots_;
      if (at::functionalization::impl::isFunctionalTensor(LU_pivots)) {
        at::functionalization::impl::sync(LU_pivots);
        LU_pivots_ = at::functionalization::impl::from_functional_tensor(LU_pivots);
      } else {
        LU_pivots_ = LU_pivots;
      }
      
      at::Tensor P_;
      if (at::functionalization::impl::isFunctionalTensor(P)) {
        at::functionalization::impl::sync(P);
        P_ = at::functionalization::impl::from_functional_tensor(P);
      } else {
        P_ = P;
      }
      
      at::Tensor L_;
      if (at::functionalization::impl::isFunctionalTensor(L)) {
        at::functionalization::impl::sync(L);
        L_ = at::functionalization::impl::from_functional_tensor(L);
      } else {
        L_ = L;
      }
      
      at::Tensor U_;
      if (at::functionalization::impl::isFunctionalTensor(U)) {
        at::functionalization::impl::sync(U);
        U_ = at::functionalization::impl::from_functional_tensor(U);
      } else {
        U_ = U;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(P) && at::functionalization::impl::isFunctionalTensor(L) && at::functionalization::impl::isFunctionalTensor(U))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || LU_data.device().type() == c10::DeviceType::XLA || LU_pivots.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(LU_data) || at::functionalization::impl::isFunctionalTensor(LU_pivots))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::lu_unpack_out::call(LU_data_, LU_pivots_, unpack_data, unpack_pivots, P_, L_, U_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(P, L, U);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::lu_unpack::call(LU_data_, LU_pivots_, unpack_data, unpack_pivots);
        }
          auto P_inner = at::functionalization::impl::from_functional_tensor(P);
  at::functionalization::impl::replace_(P, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(P);
  at::functionalization::impl::sync(P);
  auto P_inner_updated = at::functionalization::impl::from_functional_tensor(P);
  at::functionalization::impl::propagate_xla_data_direct(P_inner, P_inner_updated);
  auto L_inner = at::functionalization::impl::from_functional_tensor(L);
  at::functionalization::impl::replace_(L, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(L);
  at::functionalization::impl::sync(L);
  auto L_inner_updated = at::functionalization::impl::from_functional_tensor(L);
  at::functionalization::impl::propagate_xla_data_direct(L_inner, L_inner_updated);
  auto U_inner = at::functionalization::impl::from_functional_tensor(U);
  at::functionalization::impl::replace_(U, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(U);
  at::functionalization::impl::sync(U);
  auto U_inner_updated = at::functionalization::impl::from_functional_tensor(U);
  at::functionalization::impl::propagate_xla_data_direct(U_inner, U_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(P, L, U);
      }
    }

    at::Tensor & multinomial_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t num_samples, bool replacement, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::multinomial_out::call(self_meta, num_samples, replacement, generator, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::multinomial_out::call(self_, num_samples, replacement, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::multinomial::call(self_, num_samples, replacement, generator);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & lgamma_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::lgamma_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::lgamma_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::lgamma::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & lgamma_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::lgamma_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::lgamma_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::lgamma::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & polygamma_out_out(c10::DispatchKeySet dispatchKeySet, int64_t n, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::polygamma_out::call(n, self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::polygamma_out::call(n, self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::polygamma::call(n, self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & erfinv_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::erfinv_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::erfinv_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::erfinv::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & erfinv_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::erfinv_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::erfinv_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::erfinv::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & i0_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::i0_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::i0_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::i0::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & i0_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::i0_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::i0_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::i0::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & sign_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sign_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sign_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sign::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & sign_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sign_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sign_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sign::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & signbit_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::signbit_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::signbit_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::signbit::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & dist_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & p, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::dist_out::call(self_meta, other_meta, p, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::dist_out::call(self_, other_, p, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::dist::call(self_, other_, p);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & atan2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::atan2_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::atan2_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::atan2::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & atan2_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::atan2_::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::atan2_::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::atan2::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & arctan2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::arctan2_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::arctan2_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::arctan2::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & arctan2_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::arctan2_::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::arctan2_::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::arctan2::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & histc_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::histc_out::call(self_meta, bins, min, max, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::histc_out::call(self_, bins, min, max, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::histc::call(self_, bins, min, max);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> histogram_out_bins_tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & bins, const ::std::optional<at::Tensor> & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto bins_meta = to_meta(bins);
        auto weight_meta = to_meta(weight);
        auto hist_meta = to_meta(hist);
        auto bin_edges_meta = to_meta(bin_edges);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::histogram_bins_tensor_out::call(self_meta, bins_meta, weight_meta, density, hist_meta, bin_edges_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor bins_;
      if (at::functionalization::impl::isFunctionalTensor(bins)) {
        at::functionalization::impl::sync(bins);
        bins_ = at::functionalization::impl::from_functional_tensor(bins);
      } else {
        bins_ = bins;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor hist_;
      if (at::functionalization::impl::isFunctionalTensor(hist)) {
        at::functionalization::impl::sync(hist);
        hist_ = at::functionalization::impl::from_functional_tensor(hist);
      } else {
        hist_ = hist;
      }
      
      at::Tensor bin_edges_;
      if (at::functionalization::impl::isFunctionalTensor(bin_edges)) {
        at::functionalization::impl::sync(bin_edges);
        bin_edges_ = at::functionalization::impl::from_functional_tensor(bin_edges);
      } else {
        bin_edges_ = bin_edges;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(hist) && at::functionalization::impl::isFunctionalTensor(bin_edges))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || bins.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(bins) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::histogram_bins_tensor_out::call(self_, bins_, weight_, density, hist_, bin_edges_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(hist, bin_edges);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::histogram_bins_tensor::call(self_, bins_, weight_, density);
        }
          auto hist_inner = at::functionalization::impl::from_functional_tensor(hist);
  at::functionalization::impl::replace_(hist, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(hist);
  at::functionalization::impl::sync(hist);
  auto hist_inner_updated = at::functionalization::impl::from_functional_tensor(hist);
  at::functionalization::impl::propagate_xla_data_direct(hist_inner, hist_inner_updated);
  auto bin_edges_inner = at::functionalization::impl::from_functional_tensor(bin_edges);
  at::functionalization::impl::replace_(bin_edges, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(bin_edges);
  at::functionalization::impl::sync(bin_edges);
  auto bin_edges_inner_updated = at::functionalization::impl::from_functional_tensor(bin_edges);
  at::functionalization::impl::propagate_xla_data_direct(bin_edges_inner, bin_edges_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(hist, bin_edges);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> histogram_out_bin_ct_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto hist_meta = to_meta(hist);
        auto bin_edges_meta = to_meta(bin_edges);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::histogram_bin_ct_out::call(self_meta, bins, range, weight_meta, density, hist_meta, bin_edges_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor hist_;
      if (at::functionalization::impl::isFunctionalTensor(hist)) {
        at::functionalization::impl::sync(hist);
        hist_ = at::functionalization::impl::from_functional_tensor(hist);
      } else {
        hist_ = hist;
      }
      
      at::Tensor bin_edges_;
      if (at::functionalization::impl::isFunctionalTensor(bin_edges)) {
        at::functionalization::impl::sync(bin_edges);
        bin_edges_ = at::functionalization::impl::from_functional_tensor(bin_edges);
      } else {
        bin_edges_ = bin_edges;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(hist) && at::functionalization::impl::isFunctionalTensor(bin_edges))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::histogram_bin_ct_out::call(self_, bins, range, weight_, density, hist_, bin_edges_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(hist, bin_edges);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::histogram_bin_ct::call(self_, bins, range, weight_, density);
        }
          auto hist_inner = at::functionalization::impl::from_functional_tensor(hist);
  at::functionalization::impl::replace_(hist, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(hist);
  at::functionalization::impl::sync(hist);
  auto hist_inner_updated = at::functionalization::impl::from_functional_tensor(hist);
  at::functionalization::impl::propagate_xla_data_direct(hist_inner, hist_inner_updated);
  auto bin_edges_inner = at::functionalization::impl::from_functional_tensor(bin_edges);
  at::functionalization::impl::replace_(bin_edges, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(bin_edges);
  at::functionalization::impl::sync(bin_edges);
  auto bin_edges_inner_updated = at::functionalization::impl::from_functional_tensor(bin_edges);
  at::functionalization::impl::propagate_xla_data_direct(bin_edges_inner, bin_edges_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(hist, bin_edges);
      }
    }

    void _histogramdd_bin_edges_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_histogramdd_bin_edges_out::call(self_meta, bins, range, weight_meta, density, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_histogramdd_bin_edges_out::call(self_, bins, range, weight_, density, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_histogramdd_bin_edges::call(self_, bins, range, weight_, density);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    at::Tensor & _histogramdd_from_bin_cts_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, ::std::optional<at::ArrayRef<double>> range, const ::std::optional<at::Tensor> & weight, bool density, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_histogramdd_from_bin_cts_out::call(self_meta, bins, range, weight_meta, density, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_histogramdd_from_bin_cts_out::call(self_, bins, range, weight_, density, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_histogramdd_from_bin_cts::call(self_, bins, range, weight_, density);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _histogramdd_from_bin_tensors_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList bins, const ::std::optional<at::Tensor> & weight, bool density, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto bins_meta = to_meta(bins);
        auto weight_meta = to_meta(weight);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_histogramdd_from_bin_tensors_out::call(self_meta, bins_meta, weight_meta, density, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      ::std::vector<at::Tensor> bins_;
      if (at::functionalization::impl::isFunctionalTensor(bins)) {
        at::functionalization::impl::sync(bins);
        bins_ = at::functionalization::impl::from_functional_tensor(bins);
      } else {
        bins_ = bins.vec();
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(bins) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_histogramdd_from_bin_tensors_out::call(self_, bins_, weight_, density, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_histogramdd_from_bin_tensors::call(self_, bins_, weight_, density);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fmod_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fmod_Scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fmod_Scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fmod_Scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fmod__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fmod__Scalar::call(self_meta, other);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fmod__Scalar::call(self_, other);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fmod_Scalar::call(self_, other);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & fmod_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fmod_Tensor_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fmod_Tensor_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fmod_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fmod__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fmod__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fmod__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fmod_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & hypot_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::hypot_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::hypot_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::hypot::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & hypot_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::hypot_::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::hypot_::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::hypot::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & igamma_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::igamma_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::igamma_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::igamma::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & igamma_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::igamma_::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::igamma_::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::igamma::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & igammac_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::igammac_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::igammac_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::igammac::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & igammac_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::igammac_::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::igammac_::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::igammac::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & nextafter_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::nextafter_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::nextafter_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::nextafter::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & nextafter_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::nextafter_::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::nextafter_::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::nextafter::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & remainder_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::remainder_Scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::remainder_Scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::remainder_Scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & remainder__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::remainder__Scalar::call(self_meta, other);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::remainder__Scalar::call(self_, other);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::remainder_Scalar::call(self_, other);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & remainder_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::remainder_Tensor_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::remainder_Tensor_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::remainder_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & remainder__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::remainder__Tensor::call(self_meta, other_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::remainder__Tensor::call(self_, other_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::remainder_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & remainder_out_Scalar_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::remainder_Scalar_Tensor_out::call(self, other_meta, out_meta);
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::remainder_Scalar_Tensor_out::call(self, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::remainder_Scalar_Tensor::call(self, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & min_out_unary_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::min_unary_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::min_unary_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::min::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fmin_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fmin_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fmin_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fmin::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & max_out_unary_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::max_unary_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::max_unary_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::max::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fmax_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fmax_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fmax_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fmax::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & maximum_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::maximum_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::maximum_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::maximum::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & max_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::max_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::max_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::max_other::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & minimum_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::minimum_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::minimum_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::minimum::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & min_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::min_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::min_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::min_other::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & quantile_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & q, ::std::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto q_meta = to_meta(q);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::quantile_out::call(self_meta, q_meta, dim, keepdim, interpolation, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor q_;
      if (at::functionalization::impl::isFunctionalTensor(q)) {
        at::functionalization::impl::sync(q);
        q_ = at::functionalization::impl::from_functional_tensor(q);
      } else {
        q_ = q;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || q.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(q))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::quantile_out::call(self_, q_, dim, keepdim, interpolation, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::quantile::call(self_, q_, dim, keepdim, interpolation);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & quantile_out_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double q, ::std::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::quantile_scalar_out::call(self_meta, q, dim, keepdim, interpolation, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::quantile_scalar_out::call(self_, q, dim, keepdim, interpolation, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::quantile_scalar::call(self_, q, dim, keepdim, interpolation);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & nanquantile_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & q, ::std::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto q_meta = to_meta(q);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::nanquantile_out::call(self_meta, q_meta, dim, keepdim, interpolation, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor q_;
      if (at::functionalization::impl::isFunctionalTensor(q)) {
        at::functionalization::impl::sync(q);
        q_ = at::functionalization::impl::from_functional_tensor(q);
      } else {
        q_ = q;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || q.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(q))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::nanquantile_out::call(self_, q_, dim, keepdim, interpolation, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::nanquantile::call(self_, q_, dim, keepdim, interpolation);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & nanquantile_out_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double q, ::std::optional<int64_t> dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::nanquantile_scalar_out::call(self_meta, q, dim, keepdim, interpolation, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::nanquantile_scalar_out::call(self_, q, dim, keepdim, interpolation, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::nanquantile_scalar::call(self_, q, dim, keepdim, interpolation);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> sort_out_values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto values_meta = to_meta(values);
        auto indices_meta = to_meta(indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sort_values::call(self_meta, dim, descending, values_meta, indices_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::sort_values::call(self_, dim, descending, values_, indices_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sort::call(self_, dim, descending);
        }
          auto values_inner = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::replace_(values, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(values);
  at::functionalization::impl::sync(values);
  auto values_inner_updated = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::propagate_xla_data_direct(values_inner, values_inner_updated);
  auto indices_inner = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::replace_(indices, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(indices);
  at::functionalization::impl::sync(indices);
  auto indices_inner_updated = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::propagate_xla_data_direct(indices_inner, indices_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> sort_out_values_stable(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<bool> stable, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto values_meta = to_meta(values);
        auto indices_meta = to_meta(indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sort_values_stable::call(self_meta, stable, dim, descending, values_meta, indices_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::sort_values_stable::call(self_, stable, dim, descending, values_, indices_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sort_stable::call(self_, stable, dim, descending);
        }
          auto values_inner = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::replace_(values, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(values);
  at::functionalization::impl::sync(values);
  auto values_inner_updated = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::propagate_xla_data_direct(values_inner, values_inner_updated);
  auto indices_inner = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::replace_(indices, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(indices);
  at::functionalization::impl::sync(indices);
  auto indices_inner_updated = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::propagate_xla_data_direct(indices_inner, indices_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> sort_out_dimname_values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto values_meta = to_meta(values);
        auto indices_meta = to_meta(indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sort_dimname_values::call(self_meta, dim, descending, values_meta, indices_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::sort_dimname_values::call(self_, dim, descending, values_, indices_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sort_dimname::call(self_, dim, descending);
        }
          auto values_inner = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::replace_(values, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(values);
  at::functionalization::impl::sync(values);
  auto values_inner_updated = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::propagate_xla_data_direct(values_inner, values_inner_updated);
  auto indices_inner = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::replace_(indices, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(indices);
  at::functionalization::impl::sync(indices);
  auto indices_inner_updated = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::propagate_xla_data_direct(indices_inner, indices_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> sort_out_dimname_values_stable(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<bool> stable, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto values_meta = to_meta(values);
        auto indices_meta = to_meta(indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sort_dimname_values_stable::call(self_meta, stable, dim, descending, values_meta, indices_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::sort_dimname_values_stable::call(self_, stable, dim, descending, values_, indices_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sort_dimname_stable::call(self_, stable, dim, descending);
        }
          auto values_inner = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::replace_(values, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(values);
  at::functionalization::impl::sync(values);
  auto values_inner_updated = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::propagate_xla_data_direct(values_inner, values_inner_updated);
  auto indices_inner = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::replace_(indices, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(indices);
  at::functionalization::impl::sync(indices);
  auto indices_inner_updated = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::propagate_xla_data_direct(indices_inner, indices_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
      }
    }

    at::Tensor & msort_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::msort_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::msort_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::msort::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & argsort_out_stable_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool stable, int64_t dim, bool descending, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::argsort_stable_out::call(self_meta, stable, dim, descending, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::argsort_stable_out::call(self_, stable, dim, descending, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::argsort_stable::call(self_, stable, dim, descending);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> topk_out_values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt k, int64_t dim, bool largest, bool sorted, at::Tensor & values, at::Tensor & indices) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto values_meta = to_meta(values);
        auto indices_meta = to_meta(indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::topk_values::call(self_meta, k, dim, largest, sorted, values_meta, indices_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::topk_values::call(self_, k, dim, largest, sorted, values_, indices_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::topk::call(self_, k, dim, largest, sorted);
        }
          auto values_inner = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::replace_(values, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(values);
  at::functionalization::impl::sync(values);
  auto values_inner_updated = at::functionalization::impl::from_functional_tensor(values);
  at::functionalization::impl::propagate_xla_data_direct(values_inner, values_inner_updated);
  auto indices_inner = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::replace_(indices, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(indices);
  at::functionalization::impl::sync(indices);
  auto indices_inner_updated = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::propagate_xla_data_direct(indices_inner, indices_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);
      }
    }

    at::Tensor & all_out_all_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::all_all_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::all_all_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::all::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & any_out_all_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::any_all_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::any_all_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::any::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & renorm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::renorm_out::call(self_meta, p, dim, maxnorm, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::renorm_out::call(self_, p, dim, maxnorm, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::renorm::call(self_, p, dim, maxnorm);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & renorm_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::renorm_::call(self_meta, p, dim, maxnorm);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::renorm_::call(self_, p, dim, maxnorm);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::renorm::call(self_, p, dim, maxnorm);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & unfold_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_in_meta = to_meta(grad_in);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::unfold_backward_out::call(grad_in_meta, input_sizes, dim, size, step, out_meta);
      }
      
      at::Tensor grad_in_;
      if (at::functionalization::impl::isFunctionalTensor(grad_in)) {
        at::functionalization::impl::sync(grad_in);
        grad_in_ = at::functionalization::impl::from_functional_tensor(grad_in);
      } else {
        grad_in_ = grad_in;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_in.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_in))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::unfold_backward_out::call(grad_in_, input_sizes, dim, size, step, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::unfold_backward::call(grad_in_, input_sizes, dim, size, step);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & pow_out_Tensor_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto exponent_meta = to_meta(exponent);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::pow_Tensor_Tensor_out::call(self_meta, exponent_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor exponent_;
      if (at::functionalization::impl::isFunctionalTensor(exponent)) {
        at::functionalization::impl::sync(exponent);
        exponent_ = at::functionalization::impl::from_functional_tensor(exponent);
      } else {
        exponent_ = exponent;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || exponent.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(exponent))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::pow_Tensor_Tensor_out::call(self_, exponent_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::pow_Tensor_Tensor::call(self_, exponent_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & pow__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & exponent) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto exponent_meta = to_meta(exponent);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::pow__Tensor::call(self_meta, exponent_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor exponent_;
      if (at::functionalization::impl::isFunctionalTensor(exponent)) {
        at::functionalization::impl::sync(exponent);
        exponent_ = at::functionalization::impl::from_functional_tensor(exponent);
      } else {
        exponent_ = exponent;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || exponent.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(exponent))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::pow__Tensor::call(self_, exponent_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::pow_Tensor_Tensor::call(self_, exponent_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & pow_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto exponent_meta = to_meta(exponent);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::pow_Scalar_out::call(self, exponent_meta, out_meta);
      }
      
      at::Tensor exponent_;
      if (at::functionalization::impl::isFunctionalTensor(exponent)) {
        at::functionalization::impl::sync(exponent);
        exponent_ = at::functionalization::impl::from_functional_tensor(exponent);
      } else {
        exponent_ = exponent;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || exponent.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(exponent))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::pow_Scalar_out::call(self, exponent_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::pow_Scalar::call(self, exponent_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & pow_out_Tensor_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::pow_Tensor_Scalar_out::call(self_meta, exponent, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::pow_Tensor_Scalar_out::call(self_, exponent, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::pow_Tensor_Scalar::call(self_, exponent);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & pow__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & exponent) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::pow__Scalar::call(self_meta, exponent);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::pow__Scalar::call(self_, exponent);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::pow_Tensor_Scalar::call(self_, exponent);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & float_power_out_Tensor_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto exponent_meta = to_meta(exponent);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::float_power_Tensor_Tensor_out::call(self_meta, exponent_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor exponent_;
      if (at::functionalization::impl::isFunctionalTensor(exponent)) {
        at::functionalization::impl::sync(exponent);
        exponent_ = at::functionalization::impl::from_functional_tensor(exponent);
      } else {
        exponent_ = exponent;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || exponent.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(exponent))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::float_power_Tensor_Tensor_out::call(self_, exponent_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::float_power_Tensor_Tensor::call(self_, exponent_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & float_power__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & exponent) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto exponent_meta = to_meta(exponent);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::float_power__Tensor::call(self_meta, exponent_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor exponent_;
      if (at::functionalization::impl::isFunctionalTensor(exponent)) {
        at::functionalization::impl::sync(exponent);
        exponent_ = at::functionalization::impl::from_functional_tensor(exponent);
      } else {
        exponent_ = exponent;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || exponent.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(exponent))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::float_power__Tensor::call(self_, exponent_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::float_power_Tensor_Tensor::call(self_, exponent_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & float_power_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto exponent_meta = to_meta(exponent);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::float_power_Scalar_out::call(self, exponent_meta, out_meta);
      }
      
      at::Tensor exponent_;
      if (at::functionalization::impl::isFunctionalTensor(exponent)) {
        at::functionalization::impl::sync(exponent);
        exponent_ = at::functionalization::impl::from_functional_tensor(exponent);
      } else {
        exponent_ = exponent;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || exponent.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(exponent))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::float_power_Scalar_out::call(self, exponent_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::float_power_Scalar::call(self, exponent_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & float_power_out_Tensor_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::float_power_Tensor_Scalar_out::call(self_meta, exponent, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::float_power_Tensor_Scalar_out::call(self_, exponent, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::float_power_Tensor_Scalar::call(self_, exponent);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & float_power__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & exponent) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::float_power__Scalar::call(self_meta, exponent);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::float_power__Scalar::call(self_, exponent);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::float_power_Tensor_Scalar::call(self_, exponent);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & normal_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::normal_out::call(self_meta, mean, std, generator, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::normal_out::call(self_, mean, std, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::normal_functional::call(self_, mean, std, generator);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & normal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::normal_::call(self_meta, mean, std, generator);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::normal_::call(self_, mean, std, generator);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::normal_functional::call(self_, mean, std, generator);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & normal_out_Tensor_float_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mean, double std, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto mean_meta = to_meta(mean);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::normal_Tensor_float_out::call(mean_meta, std, generator, out_meta);
      }
      
      at::Tensor mean_;
      if (at::functionalization::impl::isFunctionalTensor(mean)) {
        at::functionalization::impl::sync(mean);
        mean_ = at::functionalization::impl::from_functional_tensor(mean);
      } else {
        mean_ = mean;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || mean.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(mean))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::normal_Tensor_float_out::call(mean_, std, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::normal_Tensor_float::call(mean_, std, generator);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & normal_out_float_Tensor_out(c10::DispatchKeySet dispatchKeySet, double mean, const at::Tensor & std, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto std_meta = to_meta(std);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::normal_float_Tensor_out::call(mean, std_meta, generator, out_meta);
      }
      
      at::Tensor std_;
      if (at::functionalization::impl::isFunctionalTensor(std)) {
        at::functionalization::impl::sync(std);
        std_ = at::functionalization::impl::from_functional_tensor(std);
      } else {
        std_ = std;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || std.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(std))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::normal_float_Tensor_out::call(mean, std_, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::normal_float_Tensor::call(mean, std_, generator);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & normal_out_Tensor_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mean, const at::Tensor & std, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto mean_meta = to_meta(mean);
        auto std_meta = to_meta(std);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::normal_Tensor_Tensor_out::call(mean_meta, std_meta, generator, out_meta);
      }
      
      at::Tensor mean_;
      if (at::functionalization::impl::isFunctionalTensor(mean)) {
        at::functionalization::impl::sync(mean);
        mean_ = at::functionalization::impl::from_functional_tensor(mean);
      } else {
        mean_ = mean;
      }
      
      at::Tensor std_;
      if (at::functionalization::impl::isFunctionalTensor(std)) {
        at::functionalization::impl::sync(std);
        std_ = at::functionalization::impl::from_functional_tensor(std);
      } else {
        std_ = std;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || mean.device().type() == c10::DeviceType::XLA || std.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(mean) || at::functionalization::impl::isFunctionalTensor(std))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::normal_Tensor_Tensor_out::call(mean_, std_, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::normal_Tensor_Tensor::call(mean_, std_, generator);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & normal_out_float_float_out(c10::DispatchKeySet dispatchKeySet, double mean, double std, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::normal_float_float_out::call(mean, std, size, generator, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::normal_float_float_out::call(mean, std, size, generator, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::normal_float_float::call(mean, std, size, generator, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    void _amp_foreach_non_finite_check_and_unscale_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto found_inf_meta = to_meta(found_inf);
        auto inv_scale_meta = to_meta(inv_scale);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_amp_foreach_non_finite_check_and_unscale_out::call(self_meta, found_inf_meta, inv_scale_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      at::Tensor found_inf_;
      if (at::functionalization::impl::isFunctionalTensor(found_inf)) {
        at::functionalization::impl::sync(found_inf);
        found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf);
      } else {
        found_inf_ = found_inf;
      }
      
      at::Tensor inv_scale_;
      if (at::functionalization::impl::isFunctionalTensor(inv_scale)) {
        at::functionalization::impl::sync(inv_scale);
        inv_scale_ = at::functionalization::impl::from_functional_tensor(inv_scale);
      } else {
        inv_scale_ = inv_scale;
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(found_inf) && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || inv_scale.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(inv_scale))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_amp_foreach_non_finite_check_and_unscale_out::call(self_, found_inf_, inv_scale_, out_);
         
        }
      } else {
        ::std::tuple<::std::vector<at::Tensor>,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_amp_foreach_non_finite_check_and_unscale::call(self_, found_inf_, inv_scale_);
        }
          auto found_inf_inner = at::functionalization::impl::from_functional_tensor(found_inf);
  at::functionalization::impl::replace_(found_inf, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(found_inf);
  at::functionalization::impl::sync(found_inf);
  auto found_inf_inner_updated = at::functionalization::impl::from_functional_tensor(found_inf);
  at::functionalization::impl::propagate_xla_data_direct(found_inf_inner, found_inf_inner_updated);
  auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _amp_foreach_non_finite_check_and_unscale_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto found_inf_meta = to_meta(found_inf);
        auto inv_scale_meta = to_meta(inv_scale);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_amp_foreach_non_finite_check_and_unscale_::call(self_meta, found_inf_meta, inv_scale_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      at::Tensor found_inf_;
      if (at::functionalization::impl::isFunctionalTensor(found_inf)) {
        at::functionalization::impl::sync(found_inf);
        found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf);
      } else {
        found_inf_ = found_inf;
      }
      
      at::Tensor inv_scale_;
      if (at::functionalization::impl::isFunctionalTensor(inv_scale)) {
        at::functionalization::impl::sync(inv_scale);
        inv_scale_ = at::functionalization::impl::from_functional_tensor(inv_scale);
      } else {
        inv_scale_ = inv_scale;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self) && at::functionalization::impl::isFunctionalTensor(found_inf))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || inv_scale.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(inv_scale))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_amp_foreach_non_finite_check_and_unscale_::call(self_, found_inf_, inv_scale_);
         
        }
      } else {
        ::std::tuple<::std::vector<at::Tensor>,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_amp_foreach_non_finite_check_and_unscale::call(self_, found_inf_, inv_scale_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
  auto found_inf_inner = at::functionalization::impl::from_functional_tensor(found_inf);
  at::functionalization::impl::replace_(found_inf, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(found_inf);
  at::functionalization::impl::sync(found_inf);
  auto found_inf_inner_updated = at::functionalization::impl::from_functional_tensor(found_inf);
  at::functionalization::impl::propagate_xla_data_direct(found_inf_inner, found_inf_inner_updated);
    
      }
    }

    at::Tensor & _amp_update_scale_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto growth_tracker_meta = to_meta(growth_tracker);
        auto found_inf_meta = to_meta(found_inf);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_amp_update_scale_out::call(self_meta, growth_tracker_meta, found_inf_meta, scale_growth_factor, scale_backoff_factor, growth_interval, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor growth_tracker_;
      if (at::functionalization::impl::isFunctionalTensor(growth_tracker)) {
        at::functionalization::impl::sync(growth_tracker);
        growth_tracker_ = at::functionalization::impl::from_functional_tensor(growth_tracker);
      } else {
        growth_tracker_ = growth_tracker;
      }
      
      at::Tensor found_inf_;
      if (at::functionalization::impl::isFunctionalTensor(found_inf)) {
        at::functionalization::impl::sync(found_inf);
        found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf);
      } else {
        found_inf_ = found_inf;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(growth_tracker) && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || found_inf.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(found_inf))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_amp_update_scale_out::call(self_, growth_tracker_, found_inf_, scale_growth_factor, scale_backoff_factor, growth_interval, out_);
         return out;
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_amp_update_scale::call(self_, growth_tracker_, found_inf_, scale_growth_factor, scale_backoff_factor, growth_interval);
        }
          auto growth_tracker_inner = at::functionalization::impl::from_functional_tensor(growth_tracker);
  at::functionalization::impl::replace_(growth_tracker, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(growth_tracker);
  at::functionalization::impl::sync(growth_tracker);
  auto growth_tracker_inner_updated = at::functionalization::impl::from_functional_tensor(growth_tracker);
  at::functionalization::impl::propagate_xla_data_direct(growth_tracker_inner, growth_tracker_inner_updated);
  auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _amp_update_scale_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto growth_tracker_meta = to_meta(growth_tracker);
        auto found_inf_meta = to_meta(found_inf);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_amp_update_scale_::call(self_meta, growth_tracker_meta, found_inf_meta, scale_growth_factor, scale_backoff_factor, growth_interval);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor growth_tracker_;
      if (at::functionalization::impl::isFunctionalTensor(growth_tracker)) {
        at::functionalization::impl::sync(growth_tracker);
        growth_tracker_ = at::functionalization::impl::from_functional_tensor(growth_tracker);
      } else {
        growth_tracker_ = growth_tracker;
      }
      
      at::Tensor found_inf_;
      if (at::functionalization::impl::isFunctionalTensor(found_inf)) {
        at::functionalization::impl::sync(found_inf);
        found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf);
      } else {
        found_inf_ = found_inf;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self) && at::functionalization::impl::isFunctionalTensor(growth_tracker))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || found_inf.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(found_inf))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_amp_update_scale_::call(self_, growth_tracker_, found_inf_, scale_growth_factor, scale_backoff_factor, growth_interval);
         return self;
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_amp_update_scale::call(self_, growth_tracker_, found_inf_, scale_growth_factor, scale_backoff_factor, growth_interval);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
  auto growth_tracker_inner = at::functionalization::impl::from_functional_tensor(growth_tracker);
  at::functionalization::impl::replace_(growth_tracker, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(growth_tracker);
  at::functionalization::impl::sync(growth_tracker);
  auto growth_tracker_inner_updated = at::functionalization::impl::from_functional_tensor(growth_tracker);
  at::functionalization::impl::propagate_xla_data_direct(growth_tracker_inner, growth_tracker_inner_updated);
    return self;
      }
    }

    void _foreach_add_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_add_Scalar_out::call(self_meta, scalar, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_add_Scalar_out::call(self_, scalar, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_add_Scalar::call(self_, scalar);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_add__Scalar(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_add__Scalar::call(self_meta, scalar);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_add__Scalar::call(self_, scalar);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_add_Scalar::call(self_, scalar);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_add_out_List_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_add_List_out::call(self_meta, other_meta, alpha, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_add_List_out::call(self_, other_, alpha, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_add_List::call(self_, other_, alpha);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_add__List(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_add__List::call(self_meta, other_meta, alpha);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_add__List::call(self_, other_, alpha);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_add_List::call(self_, other_, alpha);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_add_out_ScalarList_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_add_ScalarList_out::call(self_meta, scalars, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_add_ScalarList_out::call(self_, scalars, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_add_ScalarList::call(self_, scalars);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_add__ScalarList(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_add__ScalarList::call(self_meta, scalars);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_add__ScalarList::call(self_, scalars);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_add_ScalarList::call(self_, scalars);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_add_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other, const at::Scalar & alpha, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_add_Tensor_out::call(self_meta, other_meta, alpha, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_add_Tensor_out::call(self_, other_, alpha, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_add_Tensor::call(self_, other_, alpha);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_add__Tensor(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other, const at::Scalar & alpha) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_add__Tensor::call(self_meta, other_meta, alpha);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_add__Tensor::call(self_, other_, alpha);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_add_Tensor::call(self_, other_, alpha);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_sub_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_sub_Scalar_out::call(self_meta, scalar, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_sub_Scalar_out::call(self_, scalar, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_sub_Scalar::call(self_, scalar);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_sub__Scalar(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_sub__Scalar::call(self_meta, scalar);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_sub__Scalar::call(self_, scalar);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_sub_Scalar::call(self_, scalar);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_sub_out_List_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_sub_List_out::call(self_meta, other_meta, alpha, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_sub_List_out::call(self_, other_, alpha, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_sub_List::call(self_, other_, alpha);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_sub__List(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_sub__List::call(self_meta, other_meta, alpha);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_sub__List::call(self_, other_, alpha);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_sub_List::call(self_, other_, alpha);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_sub_out_ScalarList_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_sub_ScalarList_out::call(self_meta, scalars, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_sub_ScalarList_out::call(self_, scalars, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_sub_ScalarList::call(self_, scalars);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_sub__ScalarList(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_sub__ScalarList::call(self_meta, scalars);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_sub__ScalarList::call(self_, scalars);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_sub_ScalarList::call(self_, scalars);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_mul_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_mul_Scalar_out::call(self_meta, scalar, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_mul_Scalar_out::call(self_, scalar, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_mul_Scalar::call(self_, scalar);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_mul__Scalar(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_mul__Scalar::call(self_meta, scalar);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_mul__Scalar::call(self_, scalar);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_mul_Scalar::call(self_, scalar);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_mul_out_List_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_mul_List_out::call(self_meta, other_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_mul_List_out::call(self_, other_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_mul_List::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_mul__List(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_mul__List::call(self_meta, other_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_mul__List::call(self_, other_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_mul_List::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_mul_out_ScalarList_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_mul_ScalarList_out::call(self_meta, scalars, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_mul_ScalarList_out::call(self_, scalars, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_mul_ScalarList::call(self_, scalars);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_mul__ScalarList(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_mul__ScalarList::call(self_meta, scalars);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_mul__ScalarList::call(self_, scalars);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_mul_ScalarList::call(self_, scalars);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_mul_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_mul_Tensor_out::call(self_meta, other_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_mul_Tensor_out::call(self_, other_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_mul_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_mul__Tensor(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_mul__Tensor::call(self_meta, other_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_mul__Tensor::call(self_, other_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_mul_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_div_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_div_Scalar_out::call(self_meta, scalar, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_div_Scalar_out::call(self_, scalar, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_div_Scalar::call(self_, scalar);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_div__Scalar(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_div__Scalar::call(self_meta, scalar);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_div__Scalar::call(self_, scalar);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_div_Scalar::call(self_, scalar);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_div_out_List_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_div_List_out::call(self_meta, other_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_div_List_out::call(self_, other_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_div_List::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_div__List(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_div__List::call(self_meta, other_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_div__List::call(self_, other_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_div_List::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_div_out_ScalarList_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_div_ScalarList_out::call(self_meta, scalars, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_div_ScalarList_out::call(self_, scalars, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_div_ScalarList::call(self_, scalars);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_div__ScalarList(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_div__ScalarList::call(self_meta, scalars);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_div__ScalarList::call(self_, scalars);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_div_ScalarList::call(self_, scalars);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_div_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_div_Tensor_out::call(self_meta, other_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_div_Tensor_out::call(self_, other_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_div_Tensor::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_div__Tensor(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_div__Tensor::call(self_meta, other_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_div__Tensor::call(self_, other_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_div_Tensor::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_clamp_max_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_clamp_max_Scalar_out::call(self_meta, scalar, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_clamp_max_Scalar_out::call(self_, scalar, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_clamp_max_Scalar::call(self_, scalar);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_clamp_max__Scalar(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_clamp_max__Scalar::call(self_meta, scalar);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_clamp_max__Scalar::call(self_, scalar);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_clamp_max_Scalar::call(self_, scalar);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_clamp_max_out_List_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_clamp_max_List_out::call(self_meta, other_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_clamp_max_List_out::call(self_, other_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_clamp_max_List::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_clamp_max__List(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_clamp_max__List::call(self_meta, other_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_clamp_max__List::call(self_, other_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_clamp_max_List::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_clamp_max_out_ScalarList_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_clamp_max_ScalarList_out::call(self_meta, scalars, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_clamp_max_ScalarList_out::call(self_, scalars, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_clamp_max_ScalarList::call(self_, scalars);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_clamp_max__ScalarList(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_clamp_max__ScalarList::call(self_meta, scalars);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_clamp_max__ScalarList::call(self_, scalars);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_clamp_max_ScalarList::call(self_, scalars);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_clamp_min_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_clamp_min_Scalar_out::call(self_meta, scalar, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_clamp_min_Scalar_out::call(self_, scalar, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_clamp_min_Scalar::call(self_, scalar);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_clamp_min__Scalar(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_clamp_min__Scalar::call(self_meta, scalar);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_clamp_min__Scalar::call(self_, scalar);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_clamp_min_Scalar::call(self_, scalar);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_clamp_min_out_List_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_clamp_min_List_out::call(self_meta, other_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_clamp_min_List_out::call(self_, other_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_clamp_min_List::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_clamp_min__List(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_clamp_min__List::call(self_meta, other_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_clamp_min__List::call(self_, other_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_clamp_min_List::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_clamp_min_out_ScalarList_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_clamp_min_ScalarList_out::call(self_meta, scalars, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_clamp_min_ScalarList_out::call(self_, scalars, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_clamp_min_ScalarList::call(self_, scalars);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_clamp_min__ScalarList(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_clamp_min__ScalarList::call(self_meta, scalars);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_clamp_min__ScalarList::call(self_, scalars);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_clamp_min_ScalarList::call(self_, scalars);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_maximum_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_maximum_Scalar_out::call(self_meta, scalar, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_maximum_Scalar_out::call(self_, scalar, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_maximum_Scalar::call(self_, scalar);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_maximum__Scalar(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_maximum__Scalar::call(self_meta, scalar);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_maximum__Scalar::call(self_, scalar);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_maximum_Scalar::call(self_, scalar);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_maximum_out_List_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_maximum_List_out::call(self_meta, other_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_maximum_List_out::call(self_, other_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_maximum_List::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_maximum__List(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_maximum__List::call(self_meta, other_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_maximum__List::call(self_, other_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_maximum_List::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_maximum_out_ScalarList_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_maximum_ScalarList_out::call(self_meta, scalars, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_maximum_ScalarList_out::call(self_, scalars, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_maximum_ScalarList::call(self_, scalars);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_maximum__ScalarList(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_maximum__ScalarList::call(self_meta, scalars);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_maximum__ScalarList::call(self_, scalars);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_maximum_ScalarList::call(self_, scalars);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_minimum_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_minimum_Scalar_out::call(self_meta, scalar, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_minimum_Scalar_out::call(self_, scalar, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_minimum_Scalar::call(self_, scalar);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_minimum__Scalar(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_minimum__Scalar::call(self_meta, scalar);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_minimum__Scalar::call(self_, scalar);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_minimum_Scalar::call(self_, scalar);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_minimum_out_List_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_minimum_List_out::call(self_meta, other_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_minimum_List_out::call(self_, other_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_minimum_List::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_minimum__List(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_minimum__List::call(self_meta, other_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_minimum__List::call(self_, other_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_minimum_List::call(self_, other_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_minimum_out_ScalarList_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_minimum_ScalarList_out::call(self_meta, scalars, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_minimum_ScalarList_out::call(self_, scalars, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_minimum_ScalarList::call(self_, scalars);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_minimum__ScalarList(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_minimum__ScalarList::call(self_meta, scalars);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_minimum__ScalarList::call(self_, scalars);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_minimum_ScalarList::call(self_, scalars);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_addcdiv_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto tensor1_meta = to_meta(tensor1);
        auto tensor2_meta = to_meta(tensor2);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_addcdiv_Scalar_out::call(self_meta, tensor1_meta, tensor2_meta, value, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> tensor1_;
      if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
        at::functionalization::impl::sync(tensor1);
        tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
      } else {
        tensor1_ = tensor1.vec();
      }
      
      ::std::vector<at::Tensor> tensor2_;
      if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
        at::functionalization::impl::sync(tensor2);
        tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
      } else {
        tensor2_ = tensor2.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_addcdiv_Scalar_out::call(self_, tensor1_, tensor2_, value, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_addcdiv_Scalar::call(self_, tensor1_, tensor2_, value);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_addcdiv__Scalar(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto tensor1_meta = to_meta(tensor1);
        auto tensor2_meta = to_meta(tensor2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_addcdiv__Scalar::call(self_meta, tensor1_meta, tensor2_meta, value);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> tensor1_;
      if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
        at::functionalization::impl::sync(tensor1);
        tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
      } else {
        tensor1_ = tensor1.vec();
      }
      
      ::std::vector<at::Tensor> tensor2_;
      if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
        at::functionalization::impl::sync(tensor2);
        tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
      } else {
        tensor2_ = tensor2.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_addcdiv__Scalar::call(self_, tensor1_, tensor2_, value);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_addcdiv_Scalar::call(self_, tensor1_, tensor2_, value);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_addcdiv_out_ScalarList_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto tensor1_meta = to_meta(tensor1);
        auto tensor2_meta = to_meta(tensor2);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_addcdiv_ScalarList_out::call(self_meta, tensor1_meta, tensor2_meta, scalars, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> tensor1_;
      if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
        at::functionalization::impl::sync(tensor1);
        tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
      } else {
        tensor1_ = tensor1.vec();
      }
      
      ::std::vector<at::Tensor> tensor2_;
      if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
        at::functionalization::impl::sync(tensor2);
        tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
      } else {
        tensor2_ = tensor2.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_addcdiv_ScalarList_out::call(self_, tensor1_, tensor2_, scalars, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_addcdiv_ScalarList::call(self_, tensor1_, tensor2_, scalars);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_addcdiv__ScalarList(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto tensor1_meta = to_meta(tensor1);
        auto tensor2_meta = to_meta(tensor2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_addcdiv__ScalarList::call(self_meta, tensor1_meta, tensor2_meta, scalars);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> tensor1_;
      if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
        at::functionalization::impl::sync(tensor1);
        tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
      } else {
        tensor1_ = tensor1.vec();
      }
      
      ::std::vector<at::Tensor> tensor2_;
      if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
        at::functionalization::impl::sync(tensor2);
        tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
      } else {
        tensor2_ = tensor2.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_addcdiv__ScalarList::call(self_, tensor1_, tensor2_, scalars);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_addcdiv_ScalarList::call(self_, tensor1_, tensor2_, scalars);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_addcdiv_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto tensor1_meta = to_meta(tensor1);
        auto tensor2_meta = to_meta(tensor2);
        auto scalars_meta = to_meta(scalars);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_addcdiv_Tensor_out::call(self_meta, tensor1_meta, tensor2_meta, scalars_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> tensor1_;
      if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
        at::functionalization::impl::sync(tensor1);
        tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
      } else {
        tensor1_ = tensor1.vec();
      }
      
      ::std::vector<at::Tensor> tensor2_;
      if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
        at::functionalization::impl::sync(tensor2);
        tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
      } else {
        tensor2_ = tensor2.vec();
      }
      
      at::Tensor scalars_;
      if (at::functionalization::impl::isFunctionalTensor(scalars)) {
        at::functionalization::impl::sync(scalars);
        scalars_ = at::functionalization::impl::from_functional_tensor(scalars);
      } else {
        scalars_ = scalars;
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || scalars.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2) || at::functionalization::impl::isFunctionalTensor(scalars))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_addcdiv_Tensor_out::call(self_, tensor1_, tensor2_, scalars_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_addcdiv_Tensor::call(self_, tensor1_, tensor2_, scalars_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_addcdiv__Tensor(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto tensor1_meta = to_meta(tensor1);
        auto tensor2_meta = to_meta(tensor2);
        auto scalars_meta = to_meta(scalars);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_addcdiv__Tensor::call(self_meta, tensor1_meta, tensor2_meta, scalars_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> tensor1_;
      if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
        at::functionalization::impl::sync(tensor1);
        tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
      } else {
        tensor1_ = tensor1.vec();
      }
      
      ::std::vector<at::Tensor> tensor2_;
      if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
        at::functionalization::impl::sync(tensor2);
        tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
      } else {
        tensor2_ = tensor2.vec();
      }
      
      at::Tensor scalars_;
      if (at::functionalization::impl::isFunctionalTensor(scalars)) {
        at::functionalization::impl::sync(scalars);
        scalars_ = at::functionalization::impl::from_functional_tensor(scalars);
      } else {
        scalars_ = scalars;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || scalars.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2) || at::functionalization::impl::isFunctionalTensor(scalars))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_addcdiv__Tensor::call(self_, tensor1_, tensor2_, scalars_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_addcdiv_Tensor::call(self_, tensor1_, tensor2_, scalars_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_addcmul_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto tensor1_meta = to_meta(tensor1);
        auto tensor2_meta = to_meta(tensor2);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_addcmul_Scalar_out::call(self_meta, tensor1_meta, tensor2_meta, value, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> tensor1_;
      if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
        at::functionalization::impl::sync(tensor1);
        tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
      } else {
        tensor1_ = tensor1.vec();
      }
      
      ::std::vector<at::Tensor> tensor2_;
      if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
        at::functionalization::impl::sync(tensor2);
        tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
      } else {
        tensor2_ = tensor2.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_addcmul_Scalar_out::call(self_, tensor1_, tensor2_, value, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_addcmul_Scalar::call(self_, tensor1_, tensor2_, value);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_addcmul__Scalar(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto tensor1_meta = to_meta(tensor1);
        auto tensor2_meta = to_meta(tensor2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_addcmul__Scalar::call(self_meta, tensor1_meta, tensor2_meta, value);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> tensor1_;
      if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
        at::functionalization::impl::sync(tensor1);
        tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
      } else {
        tensor1_ = tensor1.vec();
      }
      
      ::std::vector<at::Tensor> tensor2_;
      if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
        at::functionalization::impl::sync(tensor2);
        tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
      } else {
        tensor2_ = tensor2.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_addcmul__Scalar::call(self_, tensor1_, tensor2_, value);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_addcmul_Scalar::call(self_, tensor1_, tensor2_, value);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_addcmul_out_ScalarList_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto tensor1_meta = to_meta(tensor1);
        auto tensor2_meta = to_meta(tensor2);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_addcmul_ScalarList_out::call(self_meta, tensor1_meta, tensor2_meta, scalars, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> tensor1_;
      if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
        at::functionalization::impl::sync(tensor1);
        tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
      } else {
        tensor1_ = tensor1.vec();
      }
      
      ::std::vector<at::Tensor> tensor2_;
      if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
        at::functionalization::impl::sync(tensor2);
        tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
      } else {
        tensor2_ = tensor2.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_addcmul_ScalarList_out::call(self_, tensor1_, tensor2_, scalars, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_addcmul_ScalarList::call(self_, tensor1_, tensor2_, scalars);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_addcmul__ScalarList(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto tensor1_meta = to_meta(tensor1);
        auto tensor2_meta = to_meta(tensor2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_addcmul__ScalarList::call(self_meta, tensor1_meta, tensor2_meta, scalars);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> tensor1_;
      if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
        at::functionalization::impl::sync(tensor1);
        tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
      } else {
        tensor1_ = tensor1.vec();
      }
      
      ::std::vector<at::Tensor> tensor2_;
      if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
        at::functionalization::impl::sync(tensor2);
        tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
      } else {
        tensor2_ = tensor2.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_addcmul__ScalarList::call(self_, tensor1_, tensor2_, scalars);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_addcmul_ScalarList::call(self_, tensor1_, tensor2_, scalars);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_addcmul_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto tensor1_meta = to_meta(tensor1);
        auto tensor2_meta = to_meta(tensor2);
        auto scalars_meta = to_meta(scalars);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_addcmul_Tensor_out::call(self_meta, tensor1_meta, tensor2_meta, scalars_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> tensor1_;
      if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
        at::functionalization::impl::sync(tensor1);
        tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
      } else {
        tensor1_ = tensor1.vec();
      }
      
      ::std::vector<at::Tensor> tensor2_;
      if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
        at::functionalization::impl::sync(tensor2);
        tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
      } else {
        tensor2_ = tensor2.vec();
      }
      
      at::Tensor scalars_;
      if (at::functionalization::impl::isFunctionalTensor(scalars)) {
        at::functionalization::impl::sync(scalars);
        scalars_ = at::functionalization::impl::from_functional_tensor(scalars);
      } else {
        scalars_ = scalars;
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || scalars.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2) || at::functionalization::impl::isFunctionalTensor(scalars))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_addcmul_Tensor_out::call(self_, tensor1_, tensor2_, scalars_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_addcmul_Tensor::call(self_, tensor1_, tensor2_, scalars_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_addcmul__Tensor(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto tensor1_meta = to_meta(tensor1);
        auto tensor2_meta = to_meta(tensor2);
        auto scalars_meta = to_meta(scalars);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_addcmul__Tensor::call(self_meta, tensor1_meta, tensor2_meta, scalars_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> tensor1_;
      if (at::functionalization::impl::isFunctionalTensor(tensor1)) {
        at::functionalization::impl::sync(tensor1);
        tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1);
      } else {
        tensor1_ = tensor1.vec();
      }
      
      ::std::vector<at::Tensor> tensor2_;
      if (at::functionalization::impl::isFunctionalTensor(tensor2)) {
        at::functionalization::impl::sync(tensor2);
        tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2);
      } else {
        tensor2_ = tensor2.vec();
      }
      
      at::Tensor scalars_;
      if (at::functionalization::impl::isFunctionalTensor(scalars)) {
        at::functionalization::impl::sync(scalars);
        scalars_ = at::functionalization::impl::from_functional_tensor(scalars);
      } else {
        scalars_ = scalars;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || scalars.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2) || at::functionalization::impl::isFunctionalTensor(scalars))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_addcmul__Tensor::call(self_, tensor1_, tensor2_, scalars_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_addcmul_Tensor::call(self_, tensor1_, tensor2_, scalars_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_abs_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_abs_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_abs_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_abs::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_abs_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_abs_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_abs_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_abs::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_acos_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_acos_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_acos_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_acos::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_acos_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_acos_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_acos_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_acos::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_asin_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_asin_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_asin_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_asin::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_asin_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_asin_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_asin_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_asin::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_atan_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_atan_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_atan_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_atan::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_atan_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_atan_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_atan_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_atan::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_ceil_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_ceil_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_ceil_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_ceil::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_ceil_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_ceil_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_ceil_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_ceil::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_cos_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_cos_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_cos_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_cos::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_cos_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_cos_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_cos_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_cos::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_cosh_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_cosh_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_cosh_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_cosh::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_cosh_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_cosh_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_cosh_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_cosh::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_erf_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_erf_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_erf_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_erf::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_erf_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_erf_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_erf_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_erf::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_erfc_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_erfc_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_erfc_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_erfc::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_erfc_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_erfc_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_erfc_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_erfc::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_exp_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_exp_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_exp_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_exp::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_exp_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_exp_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_exp_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_exp::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_expm1_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_expm1_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_expm1_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_expm1::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_expm1_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_expm1_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_expm1_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_expm1::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_floor_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_floor_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_floor_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_floor::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_floor_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_floor_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_floor_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_floor::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_frac_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_frac_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_frac_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_frac::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_frac_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_frac_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_frac_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_frac::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_lerp_out_List_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, at::TensorList weights, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto tensors1_meta = to_meta(tensors1);
        auto weights_meta = to_meta(weights);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_lerp_List_out::call(self_meta, tensors1_meta, weights_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> tensors1_;
      if (at::functionalization::impl::isFunctionalTensor(tensors1)) {
        at::functionalization::impl::sync(tensors1);
        tensors1_ = at::functionalization::impl::from_functional_tensor(tensors1);
      } else {
        tensors1_ = tensors1.vec();
      }
      
      ::std::vector<at::Tensor> weights_;
      if (at::functionalization::impl::isFunctionalTensor(weights)) {
        at::functionalization::impl::sync(weights);
        weights_ = at::functionalization::impl::from_functional_tensor(weights);
      } else {
        weights_ = weights.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(tensors1) || at::functionalization::impl::isFunctionalTensor(weights))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_lerp_List_out::call(self_, tensors1_, weights_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_lerp_List::call(self_, tensors1_, weights_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_lerp__List(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto tensors1_meta = to_meta(tensors1);
        auto weights_meta = to_meta(weights);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_lerp__List::call(self_meta, tensors1_meta, weights_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> tensors1_;
      if (at::functionalization::impl::isFunctionalTensor(tensors1)) {
        at::functionalization::impl::sync(tensors1);
        tensors1_ = at::functionalization::impl::from_functional_tensor(tensors1);
      } else {
        tensors1_ = tensors1.vec();
      }
      
      ::std::vector<at::Tensor> weights_;
      if (at::functionalization::impl::isFunctionalTensor(weights)) {
        at::functionalization::impl::sync(weights);
        weights_ = at::functionalization::impl::from_functional_tensor(weights);
      } else {
        weights_ = weights.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(tensors1) || at::functionalization::impl::isFunctionalTensor(weights))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_lerp__List::call(self_, tensors1_, weights_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_lerp_List::call(self_, tensors1_, weights_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_lerp_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, const at::Scalar & weight, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto tensors1_meta = to_meta(tensors1);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_lerp_Scalar_out::call(self_meta, tensors1_meta, weight, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> tensors1_;
      if (at::functionalization::impl::isFunctionalTensor(tensors1)) {
        at::functionalization::impl::sync(tensors1);
        tensors1_ = at::functionalization::impl::from_functional_tensor(tensors1);
      } else {
        tensors1_ = tensors1.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(tensors1))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_lerp_Scalar_out::call(self_, tensors1_, weight, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_lerp_Scalar::call(self_, tensors1_, weight);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_lerp__Scalar(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto tensors1_meta = to_meta(tensors1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_lerp__Scalar::call(self_meta, tensors1_meta, weight);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> tensors1_;
      if (at::functionalization::impl::isFunctionalTensor(tensors1)) {
        at::functionalization::impl::sync(tensors1);
        tensors1_ = at::functionalization::impl::from_functional_tensor(tensors1);
      } else {
        tensors1_ = tensors1.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(tensors1))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_lerp__Scalar::call(self_, tensors1_, weight);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_lerp_Scalar::call(self_, tensors1_, weight);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_lerp_out_ScalarList_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, at::ArrayRef<at::Scalar> weight, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto tensors1_meta = to_meta(tensors1);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_lerp_ScalarList_out::call(self_meta, tensors1_meta, weight, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> tensors1_;
      if (at::functionalization::impl::isFunctionalTensor(tensors1)) {
        at::functionalization::impl::sync(tensors1);
        tensors1_ = at::functionalization::impl::from_functional_tensor(tensors1);
      } else {
        tensors1_ = tensors1.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(tensors1))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_lerp_ScalarList_out::call(self_, tensors1_, weight, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_lerp_ScalarList::call(self_, tensors1_, weight);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_lerp__ScalarList(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, at::ArrayRef<at::Scalar> weight) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto tensors1_meta = to_meta(tensors1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_lerp__ScalarList::call(self_meta, tensors1_meta, weight);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> tensors1_;
      if (at::functionalization::impl::isFunctionalTensor(tensors1)) {
        at::functionalization::impl::sync(tensors1);
        tensors1_ = at::functionalization::impl::from_functional_tensor(tensors1);
      } else {
        tensors1_ = tensors1.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(tensors1))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_lerp__ScalarList::call(self_, tensors1_, weight);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_lerp_ScalarList::call(self_, tensors1_, weight);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_lgamma_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_lgamma_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_lgamma_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_lgamma::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_lgamma_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_lgamma_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_lgamma_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_lgamma::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_log_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_log_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_log_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_log::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_log_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_log_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_log_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_log::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_log10_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_log10_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_log10_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_log10::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_log10_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_log10_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_log10_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_log10::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_log1p_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_log1p_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_log1p_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_log1p::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_log1p_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_log1p_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_log1p_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_log1p::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_log2_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_log2_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_log2_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_log2::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_log2_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_log2_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_log2_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_log2::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_max_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_max_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_max_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_max::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_neg_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_neg_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_neg_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_neg::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_neg_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_neg_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_neg_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_neg::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_norm_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & ord, ::std::optional<at::ScalarType> dtype, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_norm_Scalar_out::call(self_meta, ord, dtype, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_norm_Scalar_out::call(self_, ord, dtype, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_norm_Scalar::call(self_, ord, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_pow_out_List_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList exponent, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto exponent_meta = to_meta(exponent);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_pow_List_out::call(self_meta, exponent_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> exponent_;
      if (at::functionalization::impl::isFunctionalTensor(exponent)) {
        at::functionalization::impl::sync(exponent);
        exponent_ = at::functionalization::impl::from_functional_tensor(exponent);
      } else {
        exponent_ = exponent.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(exponent))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_pow_List_out::call(self_, exponent_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_pow_List::call(self_, exponent_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_pow__List(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList exponent) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto exponent_meta = to_meta(exponent);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_pow__List::call(self_meta, exponent_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> exponent_;
      if (at::functionalization::impl::isFunctionalTensor(exponent)) {
        at::functionalization::impl::sync(exponent);
        exponent_ = at::functionalization::impl::from_functional_tensor(exponent);
      } else {
        exponent_ = exponent.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(exponent))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_pow__List::call(self_, exponent_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_pow_List::call(self_, exponent_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_pow_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & exponent, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_pow_Scalar_out::call(self_meta, exponent, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_pow_Scalar_out::call(self_, exponent, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_pow_Scalar::call(self_, exponent);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_pow__Scalar(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & exponent) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_pow__Scalar::call(self_meta, exponent);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_pow__Scalar::call(self_, exponent);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_pow_Scalar::call(self_, exponent);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_pow_out_ScalarList_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> exponent, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_pow_ScalarList_out::call(self_meta, exponent, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_pow_ScalarList_out::call(self_, exponent, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_pow_ScalarList::call(self_, exponent);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_pow__ScalarList(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> exponent) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_pow__ScalarList::call(self_meta, exponent);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_pow__ScalarList::call(self_, exponent);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_pow_ScalarList::call(self_, exponent);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_reciprocal_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_reciprocal_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_reciprocal_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_reciprocal::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_reciprocal_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_reciprocal_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_reciprocal_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_reciprocal::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_round_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_round_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_round_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_round::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_round_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_round_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_round_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_round::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_rsqrt_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_rsqrt_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_rsqrt_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_rsqrt::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_rsqrt_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_rsqrt_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_rsqrt_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_rsqrt::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_sigmoid_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_sigmoid_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_sigmoid_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_sigmoid::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_sigmoid_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_sigmoid_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_sigmoid_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_sigmoid::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_sign_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_sign_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_sign_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_sign::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_sign_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_sign_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_sign_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_sign::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_sin_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_sin_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_sin_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_sin::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_sin_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_sin_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_sin_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_sin::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_sinh_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_sinh_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_sinh_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_sinh::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_sinh_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_sinh_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_sinh_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_sinh::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_sqrt_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_sqrt_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_sqrt_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_sqrt::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_sqrt_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_sqrt_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_sqrt_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_sqrt::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_tan_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_tan_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_tan_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_tan::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_tan_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_tan_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_tan_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_tan::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_tanh_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_tanh_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_tanh_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_tanh::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_tanh_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_tanh_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_tanh_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_tanh::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_trunc_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_trunc_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_trunc_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_trunc::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_trunc_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_trunc_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_trunc_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_trunc::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_zero_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_zero_out::call(self_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_zero_out::call(self_, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_zero::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_zero_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_zero_::call(self_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_zero_::call(self_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_zero::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    void _foreach_copy_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList src, bool non_blocking, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto src_meta = to_meta(src);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_copy_out::call(self_meta, src_meta, non_blocking, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> src_;
      if (at::functionalization::impl::isFunctionalTensor(src)) {
        at::functionalization::impl::sync(src);
        src_ = at::functionalization::impl::from_functional_tensor(src);
      } else {
        src_ = src.vec();
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(src))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_copy_out::call(self_, src_, non_blocking, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_copy::call(self_, src_, non_blocking);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _foreach_copy_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList src, bool non_blocking) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto src_meta = to_meta(src);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foreach_copy_::call(self_meta, src_meta, non_blocking);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> src_;
      if (at::functionalization::impl::isFunctionalTensor(src)) {
        at::functionalization::impl::sync(src);
        src_ = at::functionalization::impl::from_functional_tensor(src);
      } else {
        src_ = src.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(src))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_foreach_copy_::call(self_, src_, non_blocking);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foreach_copy::call(self_, src_, non_blocking);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    
      }
    }

    at::Tensor & bucketize_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto boundaries_meta = to_meta(boundaries);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bucketize_Tensor_out::call(self_meta, boundaries_meta, out_int32, right, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor boundaries_;
      if (at::functionalization::impl::isFunctionalTensor(boundaries)) {
        at::functionalization::impl::sync(boundaries);
        boundaries_ = at::functionalization::impl::from_functional_tensor(boundaries);
      } else {
        boundaries_ = boundaries;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || boundaries.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(boundaries))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bucketize_Tensor_out::call(self_, boundaries_, out_int32, right, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bucketize_Tensor::call(self_, boundaries_, out_int32, right);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & bucketize_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto boundaries_meta = to_meta(boundaries);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::bucketize_Scalar_out::call(self, boundaries_meta, out_int32, right, out_meta);
      }
      
      at::Tensor boundaries_;
      if (at::functionalization::impl::isFunctionalTensor(boundaries)) {
        at::functionalization::impl::sync(boundaries);
        boundaries_ = at::functionalization::impl::from_functional_tensor(boundaries);
      } else {
        boundaries_ = boundaries;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || boundaries.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(boundaries))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::bucketize_Scalar_out::call(self, boundaries_, out_int32, right, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::bucketize_Scalar::call(self, boundaries_, out_int32, right);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & searchsorted_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, ::std::optional<c10::string_view> side, const ::std::optional<at::Tensor> & sorter, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto sorted_sequence_meta = to_meta(sorted_sequence);
        auto self_meta = to_meta(self);
        auto sorter_meta = to_meta(sorter);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::searchsorted_Tensor_out::call(sorted_sequence_meta, self_meta, out_int32, right, side, sorter_meta, out_meta);
      }
      
      at::Tensor sorted_sequence_;
      if (at::functionalization::impl::isFunctionalTensor(sorted_sequence)) {
        at::functionalization::impl::sync(sorted_sequence);
        sorted_sequence_ = at::functionalization::impl::from_functional_tensor(sorted_sequence);
      } else {
        sorted_sequence_ = sorted_sequence;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      ::std::optional<at::Tensor> sorter_;
      if (at::functionalization::impl::isFunctionalTensor(sorter)) {
        at::functionalization::impl::sync(sorter);
        sorter_ = at::functionalization::impl::from_functional_tensor(sorter);
      } else {
        sorter_ = sorter;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || sorted_sequence.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(sorted_sequence) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(sorter))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::searchsorted_Tensor_out::call(sorted_sequence_, self_, out_int32, right, side, sorter_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::searchsorted_Tensor::call(sorted_sequence_, self_, out_int32, right, side, sorter_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & searchsorted_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, ::std::optional<c10::string_view> side, const ::std::optional<at::Tensor> & sorter, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto sorted_sequence_meta = to_meta(sorted_sequence);
        auto sorter_meta = to_meta(sorter);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::searchsorted_Scalar_out::call(sorted_sequence_meta, self, out_int32, right, side, sorter_meta, out_meta);
      }
      
      at::Tensor sorted_sequence_;
      if (at::functionalization::impl::isFunctionalTensor(sorted_sequence)) {
        at::functionalization::impl::sync(sorted_sequence);
        sorted_sequence_ = at::functionalization::impl::from_functional_tensor(sorted_sequence);
      } else {
        sorted_sequence_ = sorted_sequence;
      }
      
      ::std::optional<at::Tensor> sorter_;
      if (at::functionalization::impl::isFunctionalTensor(sorter)) {
        at::functionalization::impl::sync(sorter);
        sorter_ = at::functionalization::impl::from_functional_tensor(sorter);
      } else {
        sorter_ = sorter;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || sorted_sequence.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(sorted_sequence) || at::functionalization::impl::isFunctionalTensor(sorter))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::searchsorted_Scalar_out::call(sorted_sequence_, self, out_int32, right, side, sorter_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::searchsorted_Scalar::call(sorted_sequence_, self, out_int32, right, side, sorter_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _convert_indices_from_coo_to_csr_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t size, bool out_int32, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_convert_indices_from_coo_to_csr_out::call(self_meta, size, out_int32, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_convert_indices_from_coo_to_csr_out::call(self_, size, out_int32, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_convert_indices_from_coo_to_csr::call(self_, size, out_int32);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _convert_indices_from_csr_to_coo_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto crow_indices_meta = to_meta(crow_indices);
        auto col_indices_meta = to_meta(col_indices);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_convert_indices_from_csr_to_coo_out::call(crow_indices_meta, col_indices_meta, out_int32, transpose, out_meta);
      }
      
      at::Tensor crow_indices_;
      if (at::functionalization::impl::isFunctionalTensor(crow_indices)) {
        at::functionalization::impl::sync(crow_indices);
        crow_indices_ = at::functionalization::impl::from_functional_tensor(crow_indices);
      } else {
        crow_indices_ = crow_indices;
      }
      
      at::Tensor col_indices_;
      if (at::functionalization::impl::isFunctionalTensor(col_indices)) {
        at::functionalization::impl::sync(col_indices);
        col_indices_ = at::functionalization::impl::from_functional_tensor(col_indices);
      } else {
        col_indices_ = col_indices;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || crow_indices.device().type() == c10::DeviceType::XLA || col_indices.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(crow_indices) || at::functionalization::impl::isFunctionalTensor(col_indices))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_convert_indices_from_csr_to_coo_out::call(crow_indices_, col_indices_, out_int32, transpose, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_convert_indices_from_csr_to_coo::call(crow_indices_, col_indices_, out_int32, transpose);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & mse_loss_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto target_meta = to_meta(target);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mse_loss_out::call(self_meta, target_meta, reduction, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor target_;
      if (at::functionalization::impl::isFunctionalTensor(target)) {
        at::functionalization::impl::sync(target);
        target_ = at::functionalization::impl::from_functional_tensor(target);
      } else {
        target_ = target;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || target.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mse_loss_out::call(self_, target_, reduction, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mse_loss::call(self_, target_, reduction);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & mse_loss_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto target_meta = to_meta(target);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mse_loss_backward_grad_input::call(grad_output_meta, self_meta, target_meta, reduction, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor target_;
      if (at::functionalization::impl::isFunctionalTensor(target)) {
        at::functionalization::impl::sync(target);
        target_ = at::functionalization::impl::from_functional_tensor(target);
      } else {
        target_ = target;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA || target.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mse_loss_backward_grad_input::call(grad_output_, self_, target_, reduction, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mse_loss_backward::call(grad_output_, self_, target_, reduction);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & multi_margin_loss_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto target_meta = to_meta(target);
        auto weight_meta = to_meta(weight);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::multi_margin_loss_out::call(self_meta, target_meta, p, margin, weight_meta, reduction, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor target_;
      if (at::functionalization::impl::isFunctionalTensor(target)) {
        at::functionalization::impl::sync(target);
        target_ = at::functionalization::impl::from_functional_tensor(target);
      } else {
        target_ = target;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || target.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::multi_margin_loss_out::call(self_, target_, p, margin, weight_, reduction, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::multi_margin_loss::call(self_, target_, p, margin, weight_, reduction);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & multi_margin_loss_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto target_meta = to_meta(target);
        auto weight_meta = to_meta(weight);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::multi_margin_loss_backward_grad_input::call(grad_output_meta, self_meta, target_meta, p, margin, weight_meta, reduction, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor target_;
      if (at::functionalization::impl::isFunctionalTensor(target)) {
        at::functionalization::impl::sync(target);
        target_ = at::functionalization::impl::from_functional_tensor(target);
      } else {
        target_ = target;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA || target.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::multi_margin_loss_backward_grad_input::call(grad_output_, self_, target_, p, margin, weight_, reduction, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::multi_margin_loss_backward::call(grad_output_, self_, target_, p, margin, weight_, reduction);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & multilabel_margin_loss_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto target_meta = to_meta(target);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::multilabel_margin_loss_out::call(self_meta, target_meta, reduction, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor target_;
      if (at::functionalization::impl::isFunctionalTensor(target)) {
        at::functionalization::impl::sync(target);
        target_ = at::functionalization::impl::from_functional_tensor(target);
      } else {
        target_ = target;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || target.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::multilabel_margin_loss_out::call(self_, target_, reduction, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::multilabel_margin_loss::call(self_, target_, reduction);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> multilabel_margin_loss_forward_out_output(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & output, at::Tensor & is_target) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto target_meta = to_meta(target);
        auto output_meta = to_meta(output);
        auto is_target_meta = to_meta(is_target);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::multilabel_margin_loss_forward_output::call(self_meta, target_meta, reduction, output_meta, is_target_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor target_;
      if (at::functionalization::impl::isFunctionalTensor(target)) {
        at::functionalization::impl::sync(target);
        target_ = at::functionalization::impl::from_functional_tensor(target);
      } else {
        target_ = target;
      }
      
      at::Tensor output_;
      if (at::functionalization::impl::isFunctionalTensor(output)) {
        at::functionalization::impl::sync(output);
        output_ = at::functionalization::impl::from_functional_tensor(output);
      } else {
        output_ = output;
      }
      
      at::Tensor is_target_;
      if (at::functionalization::impl::isFunctionalTensor(is_target)) {
        at::functionalization::impl::sync(is_target);
        is_target_ = at::functionalization::impl::from_functional_tensor(is_target);
      } else {
        is_target_ = is_target;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(output) && at::functionalization::impl::isFunctionalTensor(is_target))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || target.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::multilabel_margin_loss_forward_output::call(self_, target_, reduction, output_, is_target_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(output, is_target);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::multilabel_margin_loss_forward::call(self_, target_, reduction);
        }
          auto output_inner = at::functionalization::impl::from_functional_tensor(output);
  at::functionalization::impl::replace_(output, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(output);
  at::functionalization::impl::sync(output);
  auto output_inner_updated = at::functionalization::impl::from_functional_tensor(output);
  at::functionalization::impl::propagate_xla_data_direct(output_inner, output_inner_updated);
  auto is_target_inner = at::functionalization::impl::from_functional_tensor(is_target);
  at::functionalization::impl::replace_(is_target, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(is_target);
  at::functionalization::impl::sync(is_target);
  auto is_target_inner_updated = at::functionalization::impl::from_functional_tensor(is_target);
  at::functionalization::impl::propagate_xla_data_direct(is_target_inner, is_target_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(output, is_target);
      }
    }

    at::Tensor & multilabel_margin_loss_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto target_meta = to_meta(target);
        auto is_target_meta = to_meta(is_target);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::multilabel_margin_loss_backward_grad_input::call(grad_output_meta, self_meta, target_meta, reduction, is_target_meta, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor target_;
      if (at::functionalization::impl::isFunctionalTensor(target)) {
        at::functionalization::impl::sync(target);
        target_ = at::functionalization::impl::from_functional_tensor(target);
      } else {
        target_ = target;
      }
      
      at::Tensor is_target_;
      if (at::functionalization::impl::isFunctionalTensor(is_target)) {
        at::functionalization::impl::sync(is_target);
        is_target_ = at::functionalization::impl::from_functional_tensor(is_target);
      } else {
        is_target_ = is_target;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA || target.device().type() == c10::DeviceType::XLA || is_target.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target) || at::functionalization::impl::isFunctionalTensor(is_target))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::multilabel_margin_loss_backward_grad_input::call(grad_output_, self_, target_, reduction, is_target_, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::multilabel_margin_loss_backward::call(grad_output_, self_, target_, reduction, is_target_);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & nll_loss_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto target_meta = to_meta(target);
        auto weight_meta = to_meta(weight);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::nll_loss_out::call(self_meta, target_meta, weight_meta, reduction, ignore_index, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor target_;
      if (at::functionalization::impl::isFunctionalTensor(target)) {
        at::functionalization::impl::sync(target);
        target_ = at::functionalization::impl::from_functional_tensor(target);
      } else {
        target_ = target;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || target.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::nll_loss_out::call(self_, target_, weight_, reduction, ignore_index, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::nll_loss::call(self_, target_, weight_, reduction, ignore_index);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> nll_loss_forward_out_output(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto target_meta = to_meta(target);
        auto weight_meta = to_meta(weight);
        auto output_meta = to_meta(output);
        auto total_weight_meta = to_meta(total_weight);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::nll_loss_forward_output::call(self_meta, target_meta, weight_meta, reduction, ignore_index, output_meta, total_weight_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor target_;
      if (at::functionalization::impl::isFunctionalTensor(target)) {
        at::functionalization::impl::sync(target);
        target_ = at::functionalization::impl::from_functional_tensor(target);
      } else {
        target_ = target;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor output_;
      if (at::functionalization::impl::isFunctionalTensor(output)) {
        at::functionalization::impl::sync(output);
        output_ = at::functionalization::impl::from_functional_tensor(output);
      } else {
        output_ = output;
      }
      
      at::Tensor total_weight_;
      if (at::functionalization::impl::isFunctionalTensor(total_weight)) {
        at::functionalization::impl::sync(total_weight);
        total_weight_ = at::functionalization::impl::from_functional_tensor(total_weight);
      } else {
        total_weight_ = total_weight;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(output) && at::functionalization::impl::isFunctionalTensor(total_weight))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || target.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::nll_loss_forward_output::call(self_, target_, weight_, reduction, ignore_index, output_, total_weight_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(output, total_weight);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::nll_loss_forward::call(self_, target_, weight_, reduction, ignore_index);
        }
          auto output_inner = at::functionalization::impl::from_functional_tensor(output);
  at::functionalization::impl::replace_(output, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(output);
  at::functionalization::impl::sync(output);
  auto output_inner_updated = at::functionalization::impl::from_functional_tensor(output);
  at::functionalization::impl::propagate_xla_data_direct(output_inner, output_inner_updated);
  auto total_weight_inner = at::functionalization::impl::from_functional_tensor(total_weight);
  at::functionalization::impl::replace_(total_weight, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(total_weight);
  at::functionalization::impl::sync(total_weight);
  auto total_weight_inner_updated = at::functionalization::impl::from_functional_tensor(total_weight);
  at::functionalization::impl::propagate_xla_data_direct(total_weight_inner, total_weight_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(output, total_weight);
      }
    }

    at::Tensor & nll_loss_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto target_meta = to_meta(target);
        auto weight_meta = to_meta(weight);
        auto total_weight_meta = to_meta(total_weight);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::nll_loss_backward_grad_input::call(grad_output_meta, self_meta, target_meta, weight_meta, reduction, ignore_index, total_weight_meta, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor target_;
      if (at::functionalization::impl::isFunctionalTensor(target)) {
        at::functionalization::impl::sync(target);
        target_ = at::functionalization::impl::from_functional_tensor(target);
      } else {
        target_ = target;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor total_weight_;
      if (at::functionalization::impl::isFunctionalTensor(total_weight)) {
        at::functionalization::impl::sync(total_weight);
        total_weight_ = at::functionalization::impl::from_functional_tensor(total_weight);
      } else {
        total_weight_ = total_weight;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA || target.device().type() == c10::DeviceType::XLA || total_weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(total_weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::nll_loss_backward_grad_input::call(grad_output_, self_, target_, weight_, reduction, ignore_index, total_weight_, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::nll_loss_backward::call(grad_output_, self_, target_, weight_, reduction, ignore_index, total_weight_);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & nll_loss2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto target_meta = to_meta(target);
        auto weight_meta = to_meta(weight);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::nll_loss2d_out::call(self_meta, target_meta, weight_meta, reduction, ignore_index, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor target_;
      if (at::functionalization::impl::isFunctionalTensor(target)) {
        at::functionalization::impl::sync(target);
        target_ = at::functionalization::impl::from_functional_tensor(target);
      } else {
        target_ = target;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || target.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::nll_loss2d_out::call(self_, target_, weight_, reduction, ignore_index, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::nll_loss2d::call(self_, target_, weight_, reduction, ignore_index);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> nll_loss2d_forward_out_output(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto target_meta = to_meta(target);
        auto weight_meta = to_meta(weight);
        auto output_meta = to_meta(output);
        auto total_weight_meta = to_meta(total_weight);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::nll_loss2d_forward_output::call(self_meta, target_meta, weight_meta, reduction, ignore_index, output_meta, total_weight_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor target_;
      if (at::functionalization::impl::isFunctionalTensor(target)) {
        at::functionalization::impl::sync(target);
        target_ = at::functionalization::impl::from_functional_tensor(target);
      } else {
        target_ = target;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor output_;
      if (at::functionalization::impl::isFunctionalTensor(output)) {
        at::functionalization::impl::sync(output);
        output_ = at::functionalization::impl::from_functional_tensor(output);
      } else {
        output_ = output;
      }
      
      at::Tensor total_weight_;
      if (at::functionalization::impl::isFunctionalTensor(total_weight)) {
        at::functionalization::impl::sync(total_weight);
        total_weight_ = at::functionalization::impl::from_functional_tensor(total_weight);
      } else {
        total_weight_ = total_weight;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(output) && at::functionalization::impl::isFunctionalTensor(total_weight))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || target.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::nll_loss2d_forward_output::call(self_, target_, weight_, reduction, ignore_index, output_, total_weight_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(output, total_weight);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::nll_loss2d_forward::call(self_, target_, weight_, reduction, ignore_index);
        }
          auto output_inner = at::functionalization::impl::from_functional_tensor(output);
  at::functionalization::impl::replace_(output, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(output);
  at::functionalization::impl::sync(output);
  auto output_inner_updated = at::functionalization::impl::from_functional_tensor(output);
  at::functionalization::impl::propagate_xla_data_direct(output_inner, output_inner_updated);
  auto total_weight_inner = at::functionalization::impl::from_functional_tensor(total_weight);
  at::functionalization::impl::replace_(total_weight, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(total_weight);
  at::functionalization::impl::sync(total_weight);
  auto total_weight_inner_updated = at::functionalization::impl::from_functional_tensor(total_weight);
  at::functionalization::impl::propagate_xla_data_direct(total_weight_inner, total_weight_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(output, total_weight);
      }
    }

    at::Tensor & nll_loss2d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto target_meta = to_meta(target);
        auto weight_meta = to_meta(weight);
        auto total_weight_meta = to_meta(total_weight);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::nll_loss2d_backward_grad_input::call(grad_output_meta, self_meta, target_meta, weight_meta, reduction, ignore_index, total_weight_meta, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor target_;
      if (at::functionalization::impl::isFunctionalTensor(target)) {
        at::functionalization::impl::sync(target);
        target_ = at::functionalization::impl::from_functional_tensor(target);
      } else {
        target_ = target;
      }
      
      ::std::optional<at::Tensor> weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor total_weight_;
      if (at::functionalization::impl::isFunctionalTensor(total_weight)) {
        at::functionalization::impl::sync(total_weight);
        total_weight_ = at::functionalization::impl::from_functional_tensor(total_weight);
      } else {
        total_weight_ = total_weight;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA || target.device().type() == c10::DeviceType::XLA || total_weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(total_weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::nll_loss2d_backward_grad_input::call(grad_output_, self_, target_, weight_, reduction, ignore_index, total_weight_, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::nll_loss2d_backward::call(grad_output_, self_, target_, weight_, reduction, ignore_index, total_weight_);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & smooth_l1_loss_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto target_meta = to_meta(target);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::smooth_l1_loss_out::call(self_meta, target_meta, reduction, beta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor target_;
      if (at::functionalization::impl::isFunctionalTensor(target)) {
        at::functionalization::impl::sync(target);
        target_ = at::functionalization::impl::from_functional_tensor(target);
      } else {
        target_ = target;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || target.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::smooth_l1_loss_out::call(self_, target_, reduction, beta, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::smooth_l1_loss::call(self_, target_, reduction, beta);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & smooth_l1_loss_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto target_meta = to_meta(target);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::smooth_l1_loss_backward_grad_input::call(grad_output_meta, self_meta, target_meta, reduction, beta, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor target_;
      if (at::functionalization::impl::isFunctionalTensor(target)) {
        at::functionalization::impl::sync(target);
        target_ = at::functionalization::impl::from_functional_tensor(target);
      } else {
        target_ = target;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA || target.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::smooth_l1_loss_backward_grad_input::call(grad_output_, self_, target_, reduction, beta, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::smooth_l1_loss_backward::call(grad_output_, self_, target_, reduction, beta);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & huber_loss_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto target_meta = to_meta(target);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::huber_loss_out::call(self_meta, target_meta, reduction, delta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor target_;
      if (at::functionalization::impl::isFunctionalTensor(target)) {
        at::functionalization::impl::sync(target);
        target_ = at::functionalization::impl::from_functional_tensor(target);
      } else {
        target_ = target;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || target.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::huber_loss_out::call(self_, target_, reduction, delta, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::huber_loss::call(self_, target_, reduction, delta);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & huber_loss_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto target_meta = to_meta(target);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::huber_loss_backward_out::call(grad_output_meta, self_meta, target_meta, reduction, delta, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor target_;
      if (at::functionalization::impl::isFunctionalTensor(target)) {
        at::functionalization::impl::sync(target);
        target_ = at::functionalization::impl::from_functional_tensor(target);
      } else {
        target_ = target;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA || target.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::huber_loss_backward_out::call(grad_output_, self_, target_, reduction, delta, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::huber_loss_backward::call(grad_output_, self_, target_, reduction, delta);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & soft_margin_loss_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto target_meta = to_meta(target);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::soft_margin_loss_out::call(self_meta, target_meta, reduction, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor target_;
      if (at::functionalization::impl::isFunctionalTensor(target)) {
        at::functionalization::impl::sync(target);
        target_ = at::functionalization::impl::from_functional_tensor(target);
      } else {
        target_ = target;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || target.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::soft_margin_loss_out::call(self_, target_, reduction, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::soft_margin_loss::call(self_, target_, reduction);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & soft_margin_loss_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto target_meta = to_meta(target);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::soft_margin_loss_backward_grad_input::call(grad_output_meta, self_meta, target_meta, reduction, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor target_;
      if (at::functionalization::impl::isFunctionalTensor(target)) {
        at::functionalization::impl::sync(target);
        target_ = at::functionalization::impl::from_functional_tensor(target);
      } else {
        target_ = target;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA || target.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::soft_margin_loss_backward_grad_input::call(grad_output_, self_, target_, reduction, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::soft_margin_loss_backward::call(grad_output_, self_, target_, reduction);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & elu_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::elu_out::call(self_meta, alpha, scale, input_scale, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::elu_out::call(self_, alpha, scale, input_scale, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::elu::call(self_, alpha, scale, input_scale);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & elu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::elu_::call(self_meta, alpha, scale, input_scale);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::elu_::call(self_, alpha, scale, input_scale);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::elu::call(self_, alpha, scale, input_scale);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & elu_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_or_result_meta = to_meta(self_or_result);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::elu_backward_grad_input::call(grad_output_meta, alpha, scale, input_scale, is_result, self_or_result_meta, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_or_result_;
      if (at::functionalization::impl::isFunctionalTensor(self_or_result)) {
        at::functionalization::impl::sync(self_or_result);
        self_or_result_ = at::functionalization::impl::from_functional_tensor(self_or_result);
      } else {
        self_or_result_ = self_or_result;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self_or_result.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self_or_result))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::elu_backward_grad_input::call(grad_output_, alpha, scale, input_scale, is_result, self_or_result_, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::elu_backward::call(grad_output_, alpha, scale, input_scale, is_result, self_or_result_);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & glu_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::glu_out::call(self_meta, dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::glu_out::call(self_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::glu::call(self_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & glu_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::glu_backward_grad_input::call(grad_output_meta, self_meta, dim, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::glu_backward_grad_input::call(grad_output_, self_, dim, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::glu_backward::call(grad_output_, self_, dim);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & glu_jvp_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto glu_meta = to_meta(glu);
        auto x_meta = to_meta(x);
        auto dx_meta = to_meta(dx);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::glu_jvp_out::call(glu_meta, x_meta, dx_meta, dim, out_meta);
      }
      
      at::Tensor glu_;
      if (at::functionalization::impl::isFunctionalTensor(glu)) {
        at::functionalization::impl::sync(glu);
        glu_ = at::functionalization::impl::from_functional_tensor(glu);
      } else {
        glu_ = glu;
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor dx_;
      if (at::functionalization::impl::isFunctionalTensor(dx)) {
        at::functionalization::impl::sync(dx);
        dx_ = at::functionalization::impl::from_functional_tensor(dx);
      } else {
        dx_ = dx;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || glu.device().type() == c10::DeviceType::XLA || x.device().type() == c10::DeviceType::XLA || dx.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(glu) || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(dx))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::glu_jvp_out::call(glu_, x_, dx_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::glu_jvp::call(glu_, x_, dx_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & glu_backward_jvp_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_x_meta = to_meta(grad_x);
        auto grad_glu_meta = to_meta(grad_glu);
        auto x_meta = to_meta(x);
        auto dgrad_glu_meta = to_meta(dgrad_glu);
        auto dx_meta = to_meta(dx);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::glu_backward_jvp_out::call(grad_x_meta, grad_glu_meta, x_meta, dgrad_glu_meta, dx_meta, dim, out_meta);
      }
      
      at::Tensor grad_x_;
      if (at::functionalization::impl::isFunctionalTensor(grad_x)) {
        at::functionalization::impl::sync(grad_x);
        grad_x_ = at::functionalization::impl::from_functional_tensor(grad_x);
      } else {
        grad_x_ = grad_x;
      }
      
      at::Tensor grad_glu_;
      if (at::functionalization::impl::isFunctionalTensor(grad_glu)) {
        at::functionalization::impl::sync(grad_glu);
        grad_glu_ = at::functionalization::impl::from_functional_tensor(grad_glu);
      } else {
        grad_glu_ = grad_glu;
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor dgrad_glu_;
      if (at::functionalization::impl::isFunctionalTensor(dgrad_glu)) {
        at::functionalization::impl::sync(dgrad_glu);
        dgrad_glu_ = at::functionalization::impl::from_functional_tensor(dgrad_glu);
      } else {
        dgrad_glu_ = dgrad_glu;
      }
      
      at::Tensor dx_;
      if (at::functionalization::impl::isFunctionalTensor(dx)) {
        at::functionalization::impl::sync(dx);
        dx_ = at::functionalization::impl::from_functional_tensor(dx);
      } else {
        dx_ = dx;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_x.device().type() == c10::DeviceType::XLA || grad_glu.device().type() == c10::DeviceType::XLA || x.device().type() == c10::DeviceType::XLA || dgrad_glu.device().type() == c10::DeviceType::XLA || dx.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_x) || at::functionalization::impl::isFunctionalTensor(grad_glu) || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(dgrad_glu) || at::functionalization::impl::isFunctionalTensor(dx))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::glu_backward_jvp_out::call(grad_x_, grad_glu_, x_, dgrad_glu_, dx_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::glu_backward_jvp::call(grad_x_, grad_glu_, x_, dgrad_glu_, dx_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & hardsigmoid_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::hardsigmoid_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::hardsigmoid_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::hardsigmoid::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & hardsigmoid_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::hardsigmoid_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::hardsigmoid_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::hardsigmoid::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & hardsigmoid_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::hardsigmoid_backward_grad_input::call(grad_output_meta, self_meta, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::hardsigmoid_backward_grad_input::call(grad_output_, self_, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::hardsigmoid_backward::call(grad_output_, self_);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & hardtanh_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::hardtanh_out::call(self_meta, min_val, max_val, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::hardtanh_out::call(self_, min_val, max_val, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::hardtanh::call(self_, min_val, max_val);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & hardtanh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::hardtanh_::call(self_meta, min_val, max_val);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::hardtanh_::call(self_, min_val, max_val);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::hardtanh::call(self_, min_val, max_val);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & hardtanh_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::hardtanh_backward_grad_input::call(grad_output_meta, self_meta, min_val, max_val, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::hardtanh_backward_grad_input::call(grad_output_, self_, min_val, max_val, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::hardtanh_backward::call(grad_output_, self_, min_val, max_val);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & hardswish_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::hardswish_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::hardswish_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::hardswish::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & hardswish_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::hardswish_::call(self_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::hardswish_::call(self_);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::hardswish::call(self_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & hardswish_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::hardswish_backward_out::call(grad_output_meta, self_meta, out_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::hardswish_backward_out::call(grad_output_, self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::hardswish_backward::call(grad_output_, self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & leaky_relu_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::leaky_relu_out::call(self_meta, negative_slope, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::leaky_relu_out::call(self_, negative_slope, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::leaky_relu::call(self_, negative_slope);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & leaky_relu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & negative_slope) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::leaky_relu_::call(self_meta, negative_slope);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::leaky_relu_::call(self_, negative_slope);
         return self;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::leaky_relu::call(self_, negative_slope);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, tmp_output);
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
    return self;
      }
    }

    at::Tensor & leaky_relu_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::leaky_relu_backward_grad_input::call(grad_output_meta, self_meta, negative_slope, self_is_result, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::leaky_relu_backward_grad_input::call(grad_output_, self_, negative_slope, self_is_result, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::leaky_relu_backward::call(grad_output_, self_, negative_slope, self_is_result);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & log_sigmoid_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::log_sigmoid_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::log_sigmoid_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::log_sigmoid::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> log_sigmoid_forward_out_output(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & output, at::Tensor & buffer) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto output_meta = to_meta(output);
        auto buffer_meta = to_meta(buffer);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::log_sigmoid_forward_output::call(self_meta, output_meta, buffer_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor output_;
      if (at::functionalization::impl::isFunctionalTensor(output)) {
        at::functionalization::impl::sync(output);
        output_ = at::functionalization::impl::from_functional_tensor(output);
      } else {
        output_ = output;
      }
      
      at::Tensor buffer_;
      if (at::functionalization::impl::isFunctionalTensor(buffer)) {
        at::functionalization::impl::sync(buffer);
        buffer_ = at::functionalization::impl::from_functional_tensor(buffer);
      } else {
        buffer_ = buffer;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(output) && at::functionalization::impl::isFunctionalTensor(buffer))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::log_sigmoid_forward_output::call(self_, output_, buffer_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(output, buffer);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::log_sigmoid_forward::call(self_);
        }
          auto output_inner = at::functionalization::impl::from_functional_tensor(output);
  at::functionalization::impl::replace_(output, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(output);
  at::functionalization::impl::sync(output);
  auto output_inner_updated = at::functionalization::impl::from_functional_tensor(output);
  at::functionalization::impl::propagate_xla_data_direct(output_inner, output_inner_updated);
  auto buffer_inner = at::functionalization::impl::from_functional_tensor(buffer);
  at::functionalization::impl::replace_(buffer, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(buffer);
  at::functionalization::impl::sync(buffer);
  auto buffer_inner_updated = at::functionalization::impl::from_functional_tensor(buffer);
  at::functionalization::impl::propagate_xla_data_direct(buffer_inner, buffer_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(output, buffer);
      }
    }

    at::Tensor & log_sigmoid_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto buffer_meta = to_meta(buffer);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::log_sigmoid_backward_grad_input::call(grad_output_meta, self_meta, buffer_meta, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor buffer_;
      if (at::functionalization::impl::isFunctionalTensor(buffer)) {
        at::functionalization::impl::sync(buffer);
        buffer_ = at::functionalization::impl::from_functional_tensor(buffer);
      } else {
        buffer_ = buffer;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA || buffer.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(buffer))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::log_sigmoid_backward_grad_input::call(grad_output_, self_, buffer_, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::log_sigmoid_backward::call(grad_output_, self_, buffer_);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & rrelu_with_noise_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto noise_meta = to_meta(noise);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::rrelu_with_noise_out::call(self_meta, noise_meta, lower, upper, training, generator, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor noise_;
      if (at::functionalization::impl::isFunctionalTensor(noise)) {
        at::functionalization::impl::sync(noise);
        noise_ = at::functionalization::impl::from_functional_tensor(noise);
      } else {
        noise_ = noise;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(noise) && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::rrelu_with_noise_out::call(self_, noise_, lower, upper, training, generator, out_);
         return out;
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::rrelu_with_noise_functional::call(self_, noise_, lower, upper, training, generator);
        }
          auto noise_inner = at::functionalization::impl::from_functional_tensor(noise);
  at::functionalization::impl::replace_(noise, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(noise);
  at::functionalization::impl::sync(noise);
  auto noise_inner_updated = at::functionalization::impl::from_functional_tensor(noise);
  at::functionalization::impl::propagate_xla_data_direct(noise_inner, noise_inner_updated);
  auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & rrelu_with_noise_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto noise_meta = to_meta(noise);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::rrelu_with_noise_::call(self_meta, noise_meta, lower, upper, training, generator);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor noise_;
      if (at::functionalization::impl::isFunctionalTensor(noise)) {
        at::functionalization::impl::sync(noise);
        noise_ = at::functionalization::impl::from_functional_tensor(noise);
      } else {
        noise_ = noise;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self) && at::functionalization::impl::isFunctionalTensor(noise))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::rrelu_with_noise_::call(self_, noise_, lower, upper, training, generator);
         return self;
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::rrelu_with_noise_functional::call(self_, noise_, lower, upper, training, generator);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
  auto noise_inner = at::functionalization::impl::from_functional_tensor(noise);
  at::functionalization::impl::replace_(noise, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(noise);
  at::functionalization::impl::sync(noise);
  auto noise_inner_updated = at::functionalization::impl::from_functional_tensor(noise);
  at::functionalization::impl::propagate_xla_data_direct(noise_inner, noise_inner_updated);
    return self;
      }
    }

    at::Tensor rrelu_with_noise(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional<at::Generator> generator) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto noise_meta = to_meta(noise);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::rrelu_with_noise::call(self_meta, noise_meta, lower, upper, training, generator);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor noise_;
      if (at::functionalization::impl::isFunctionalTensor(noise)) {
        at::functionalization::impl::sync(noise);
        noise_ = at::functionalization::impl::from_functional_tensor(noise);
      } else {
        noise_ = noise;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(noise))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::rrelu_with_noise::call(self_, noise_, lower, upper, training, generator);
         return tmp_output;
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::rrelu_with_noise_functional::call(self_, noise_, lower, upper, training, generator);
        }
          auto output_0 = at::functionalization::impl::to_functional_tensor(std::get<0>(tmp_output));
  auto noise_inner = at::functionalization::impl::from_functional_tensor(noise);
  at::functionalization::impl::replace_(noise, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(noise);
  at::functionalization::impl::sync(noise);
  auto noise_inner_updated = at::functionalization::impl::from_functional_tensor(noise);
  at::functionalization::impl::propagate_xla_data_direct(noise_inner, noise_inner_updated);
    return output_0;
      }
    }

    at::Tensor & rrelu_with_noise_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto noise_meta = to_meta(noise);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::rrelu_with_noise_backward_out::call(grad_output_meta, self_meta, noise_meta, lower, upper, training, self_is_result, out_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor noise_;
      if (at::functionalization::impl::isFunctionalTensor(noise)) {
        at::functionalization::impl::sync(noise);
        noise_ = at::functionalization::impl::from_functional_tensor(noise);
      } else {
        noise_ = noise;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA || noise.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(noise))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::rrelu_with_noise_backward_out::call(grad_output_, self_, noise_, lower, upper, training, self_is_result, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::rrelu_with_noise_backward::call(grad_output_, self_, noise_, lower, upper, training, self_is_result);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & softplus_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::softplus_out::call(self_meta, beta, threshold, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::softplus_out::call(self_, beta, threshold, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::softplus::call(self_, beta, threshold);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & softplus_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::softplus_backward_grad_input::call(grad_output_meta, self_meta, beta, threshold, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::softplus_backward_grad_input::call(grad_output_, self_, beta, threshold, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::softplus_backward::call(grad_output_, self_, beta, threshold);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & softshrink_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::softshrink_out::call(self_meta, lambd, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::softshrink_out::call(self_, lambd, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::softshrink::call(self_, lambd);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & softshrink_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::softshrink_backward_grad_input::call(grad_output_meta, self_meta, lambd, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::softshrink_backward_grad_input::call(grad_output_, self_, lambd, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::softshrink_backward::call(grad_output_, self_, lambd);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & adaptive_avg_pool2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::adaptive_avg_pool2d_out::call(self_meta, output_size, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::adaptive_avg_pool2d_out::call(self_, output_size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::adaptive_avg_pool2d::call(self_, output_size);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & mkldnn_adaptive_avg_pool2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mkldnn_adaptive_avg_pool2d_out::call(self_meta, output_size, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mkldnn_adaptive_avg_pool2d_out::call(self_, output_size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mkldnn_adaptive_avg_pool2d::call(self_, output_size);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & mkldnn_adaptive_avg_pool2d_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::mkldnn_adaptive_avg_pool2d_backward_out::call(grad_output_meta, self_meta, out_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::mkldnn_adaptive_avg_pool2d_backward_out::call(grad_output_, self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::mkldnn_adaptive_avg_pool2d_backward::call(grad_output_, self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _adaptive_avg_pool2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_adaptive_avg_pool2d_out::call(self_meta, output_size, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_adaptive_avg_pool2d_out::call(self_, output_size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_adaptive_avg_pool2d::call(self_, output_size);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _adaptive_avg_pool2d_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_adaptive_avg_pool2d_backward_out::call(grad_output_meta, self_meta, out_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_adaptive_avg_pool2d_backward_out::call(grad_output_, self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_adaptive_avg_pool2d_backward::call(grad_output_, self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & adaptive_avg_pool3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::adaptive_avg_pool3d_out::call(self_meta, output_size, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::adaptive_avg_pool3d_out::call(self_, output_size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::adaptive_avg_pool3d::call(self_, output_size);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _adaptive_avg_pool3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_adaptive_avg_pool3d_out::call(self_meta, output_size, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_adaptive_avg_pool3d_out::call(self_, output_size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_adaptive_avg_pool3d::call(self_, output_size);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _adaptive_avg_pool3d_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_adaptive_avg_pool3d_backward_out::call(grad_output_meta, self_meta, out_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_adaptive_avg_pool3d_backward_out::call(grad_output_, self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_adaptive_avg_pool3d_backward::call(grad_output_, self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        auto indices_meta = to_meta(indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::adaptive_max_pool2d_out::call(self_meta, output_size, out_meta, indices_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out) && at::functionalization::impl::isFunctionalTensor(indices))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::adaptive_max_pool2d_out::call(self_, output_size, out_, indices_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out, indices);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::adaptive_max_pool2d::call(self_, output_size);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
  auto indices_inner = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::replace_(indices, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(indices);
  at::functionalization::impl::sync(indices);
  auto indices_inner_updated = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::propagate_xla_data_direct(indices_inner, indices_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out, indices);
      }
    }

    at::Tensor & adaptive_max_pool2d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto indices_meta = to_meta(indices);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::adaptive_max_pool2d_backward_grad_input::call(grad_output_meta, self_meta, indices_meta, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA || indices.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::adaptive_max_pool2d_backward_grad_input::call(grad_output_, self_, indices_, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::adaptive_max_pool2d_backward::call(grad_output_, self_, indices_);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> adaptive_max_pool3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        auto indices_meta = to_meta(indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::adaptive_max_pool3d_out::call(self_meta, output_size, out_meta, indices_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out) && at::functionalization::impl::isFunctionalTensor(indices))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::adaptive_max_pool3d_out::call(self_, output_size, out_, indices_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out, indices);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::adaptive_max_pool3d::call(self_, output_size);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
  auto indices_inner = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::replace_(indices, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(indices);
  at::functionalization::impl::sync(indices);
  auto indices_inner_updated = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::propagate_xla_data_direct(indices_inner, indices_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out, indices);
      }
    }

    at::Tensor & adaptive_max_pool3d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto indices_meta = to_meta(indices);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::adaptive_max_pool3d_backward_grad_input::call(grad_output_meta, self_meta, indices_meta, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA || indices.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::adaptive_max_pool3d_backward_grad_input::call(grad_output_, self_, indices_, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::adaptive_max_pool3d_backward::call(grad_output_, self_, indices_);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & avg_pool2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::avg_pool2d_out::call(self_meta, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::avg_pool2d_out::call(self_, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::avg_pool2d::call(self_, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & avg_pool2d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::avg_pool2d_backward_grad_input::call(grad_output_meta, self_meta, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::avg_pool2d_backward_grad_input::call(grad_output_, self_, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::avg_pool2d_backward::call(grad_output_, self_, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & avg_pool3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::avg_pool3d_out::call(self_meta, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::avg_pool3d_out::call(self_, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::avg_pool3d::call(self_, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & avg_pool3d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::avg_pool3d_backward_grad_input::call(grad_output_meta, self_meta, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::avg_pool3d_backward_grad_input::call(grad_output_, self_, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::avg_pool3d_backward::call(grad_output_, self_, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> fractional_max_pool2d_out_output(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto random_samples_meta = to_meta(random_samples);
        auto output_meta = to_meta(output);
        auto indices_meta = to_meta(indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fractional_max_pool2d_output::call(self_meta, kernel_size, output_size, random_samples_meta, output_meta, indices_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor random_samples_;
      if (at::functionalization::impl::isFunctionalTensor(random_samples)) {
        at::functionalization::impl::sync(random_samples);
        random_samples_ = at::functionalization::impl::from_functional_tensor(random_samples);
      } else {
        random_samples_ = random_samples;
      }
      
      at::Tensor output_;
      if (at::functionalization::impl::isFunctionalTensor(output)) {
        at::functionalization::impl::sync(output);
        output_ = at::functionalization::impl::from_functional_tensor(output);
      } else {
        output_ = output;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(output) && at::functionalization::impl::isFunctionalTensor(indices))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || random_samples.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(random_samples))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::fractional_max_pool2d_output::call(self_, kernel_size, output_size, random_samples_, output_, indices_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(output, indices);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fractional_max_pool2d::call(self_, kernel_size, output_size, random_samples_);
        }
          auto output_inner = at::functionalization::impl::from_functional_tensor(output);
  at::functionalization::impl::replace_(output, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(output);
  at::functionalization::impl::sync(output);
  auto output_inner_updated = at::functionalization::impl::from_functional_tensor(output);
  at::functionalization::impl::propagate_xla_data_direct(output_inner, output_inner_updated);
  auto indices_inner = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::replace_(indices, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(indices);
  at::functionalization::impl::sync(indices);
  auto indices_inner_updated = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::propagate_xla_data_direct(indices_inner, indices_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(output, indices);
      }
    }

    at::Tensor & fractional_max_pool2d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto indices_meta = to_meta(indices);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fractional_max_pool2d_backward_grad_input::call(grad_output_meta, self_meta, kernel_size, output_size, indices_meta, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA || indices.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fractional_max_pool2d_backward_grad_input::call(grad_output_, self_, kernel_size, output_size, indices_, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fractional_max_pool2d_backward::call(grad_output_, self_, kernel_size, output_size, indices_);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> fractional_max_pool3d_out_output(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto random_samples_meta = to_meta(random_samples);
        auto output_meta = to_meta(output);
        auto indices_meta = to_meta(indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fractional_max_pool3d_output::call(self_meta, kernel_size, output_size, random_samples_meta, output_meta, indices_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor random_samples_;
      if (at::functionalization::impl::isFunctionalTensor(random_samples)) {
        at::functionalization::impl::sync(random_samples);
        random_samples_ = at::functionalization::impl::from_functional_tensor(random_samples);
      } else {
        random_samples_ = random_samples;
      }
      
      at::Tensor output_;
      if (at::functionalization::impl::isFunctionalTensor(output)) {
        at::functionalization::impl::sync(output);
        output_ = at::functionalization::impl::from_functional_tensor(output);
      } else {
        output_ = output;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(output) && at::functionalization::impl::isFunctionalTensor(indices))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || random_samples.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(random_samples))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::fractional_max_pool3d_output::call(self_, kernel_size, output_size, random_samples_, output_, indices_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(output, indices);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fractional_max_pool3d::call(self_, kernel_size, output_size, random_samples_);
        }
          auto output_inner = at::functionalization::impl::from_functional_tensor(output);
  at::functionalization::impl::replace_(output, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(output);
  at::functionalization::impl::sync(output);
  auto output_inner_updated = at::functionalization::impl::from_functional_tensor(output);
  at::functionalization::impl::propagate_xla_data_direct(output_inner, output_inner_updated);
  auto indices_inner = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::replace_(indices, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(indices);
  at::functionalization::impl::sync(indices);
  auto indices_inner_updated = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::propagate_xla_data_direct(indices_inner, indices_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(output, indices);
      }
    }

    at::Tensor & fractional_max_pool3d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto indices_meta = to_meta(indices);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fractional_max_pool3d_backward_grad_input::call(grad_output_meta, self_meta, kernel_size, output_size, indices_meta, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA || indices.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fractional_max_pool3d_backward_grad_input::call(grad_output_, self_, kernel_size, output_size, indices_, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fractional_max_pool3d_backward::call(grad_output_, self_, kernel_size, output_size, indices_);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> max_pool2d_with_indices_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        auto indices_meta = to_meta(indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::max_pool2d_with_indices_out::call(self_meta, kernel_size, stride, padding, dilation, ceil_mode, out_meta, indices_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out) && at::functionalization::impl::isFunctionalTensor(indices))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::max_pool2d_with_indices_out::call(self_, kernel_size, stride, padding, dilation, ceil_mode, out_, indices_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out, indices);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::max_pool2d_with_indices::call(self_, kernel_size, stride, padding, dilation, ceil_mode);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
  auto indices_inner = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::replace_(indices, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(indices);
  at::functionalization::impl::sync(indices);
  auto indices_inner_updated = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::propagate_xla_data_direct(indices_inner, indices_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out, indices);
      }
    }

    at::Tensor & max_pool2d_with_indices_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto indices_meta = to_meta(indices);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::max_pool2d_with_indices_backward_grad_input::call(grad_output_meta, self_meta, kernel_size, stride, padding, dilation, ceil_mode, indices_meta, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA || indices.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::max_pool2d_with_indices_backward_grad_input::call(grad_output_, self_, kernel_size, stride, padding, dilation, ceil_mode, indices_, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::max_pool2d_with_indices_backward::call(grad_output_, self_, kernel_size, stride, padding, dilation, ceil_mode, indices_);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> max_pool3d_with_indices_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        auto indices_meta = to_meta(indices);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::max_pool3d_with_indices_out::call(self_meta, kernel_size, stride, padding, dilation, ceil_mode, out_meta, indices_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out) && at::functionalization::impl::isFunctionalTensor(indices))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::max_pool3d_with_indices_out::call(self_, kernel_size, stride, padding, dilation, ceil_mode, out_, indices_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out, indices);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::max_pool3d_with_indices::call(self_, kernel_size, stride, padding, dilation, ceil_mode);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
  auto indices_inner = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::replace_(indices, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(indices);
  at::functionalization::impl::sync(indices);
  auto indices_inner_updated = at::functionalization::impl::from_functional_tensor(indices);
  at::functionalization::impl::propagate_xla_data_direct(indices_inner, indices_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out, indices);
      }
    }

    at::Tensor & max_pool3d_with_indices_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto indices_meta = to_meta(indices);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::max_pool3d_with_indices_backward_grad_input::call(grad_output_meta, self_meta, kernel_size, stride, padding, dilation, ceil_mode, indices_meta, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA || indices.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::max_pool3d_with_indices_backward_grad_input::call(grad_output_, self_, kernel_size, stride, padding, dilation, ceil_mode, indices_, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::max_pool3d_with_indices_backward::call(grad_output_, self_, kernel_size, stride, padding, dilation, ceil_mode, indices_);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & max_unpool2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto indices_meta = to_meta(indices);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::max_unpool2d_out::call(self_meta, indices_meta, output_size, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || indices.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::max_unpool2d_out::call(self_, indices_, output_size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::max_unpool2d::call(self_, indices_, output_size);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & max_unpool3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto indices_meta = to_meta(indices);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::max_unpool3d_out::call(self_meta, indices_meta, output_size, stride, padding, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || indices.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::max_unpool3d_out::call(self_, indices_, output_size, stride, padding, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::max_unpool3d::call(self_, indices_, output_size, stride, padding);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & reflection_pad1d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::reflection_pad1d_out::call(self_meta, padding, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::reflection_pad1d_out::call(self_, padding, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::reflection_pad1d::call(self_, padding);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & reflection_pad1d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::reflection_pad1d_backward_grad_input::call(grad_output_meta, self_meta, padding, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::reflection_pad1d_backward_grad_input::call(grad_output_, self_, padding, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::reflection_pad1d_backward::call(grad_output_, self_, padding);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & reflection_pad2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::reflection_pad2d_out::call(self_meta, padding, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::reflection_pad2d_out::call(self_, padding, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::reflection_pad2d::call(self_, padding);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & reflection_pad2d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::reflection_pad2d_backward_grad_input::call(grad_output_meta, self_meta, padding, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::reflection_pad2d_backward_grad_input::call(grad_output_, self_, padding, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::reflection_pad2d_backward::call(grad_output_, self_, padding);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & reflection_pad3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::reflection_pad3d_out::call(self_meta, padding, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::reflection_pad3d_out::call(self_, padding, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::reflection_pad3d::call(self_, padding);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & reflection_pad3d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::reflection_pad3d_backward_grad_input::call(grad_output_meta, self_meta, padding, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::reflection_pad3d_backward_grad_input::call(grad_output_, self_, padding, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::reflection_pad3d_backward::call(grad_output_, self_, padding);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & replication_pad1d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::replication_pad1d_out::call(self_meta, padding, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::replication_pad1d_out::call(self_, padding, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::replication_pad1d::call(self_, padding);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & replication_pad1d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::replication_pad1d_backward_grad_input::call(grad_output_meta, self_meta, padding, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::replication_pad1d_backward_grad_input::call(grad_output_, self_, padding, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::replication_pad1d_backward::call(grad_output_, self_, padding);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & replication_pad2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::replication_pad2d_out::call(self_meta, padding, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::replication_pad2d_out::call(self_, padding, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::replication_pad2d::call(self_, padding);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & replication_pad2d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::replication_pad2d_backward_grad_input::call(grad_output_meta, self_meta, padding, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::replication_pad2d_backward_grad_input::call(grad_output_, self_, padding, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::replication_pad2d_backward::call(grad_output_, self_, padding);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & replication_pad3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::replication_pad3d_out::call(self_meta, padding, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::replication_pad3d_out::call(self_, padding, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::replication_pad3d::call(self_, padding);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & replication_pad3d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::replication_pad3d_backward_grad_input::call(grad_output_meta, self_meta, padding, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::replication_pad3d_backward_grad_input::call(grad_output_, self_, padding, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::replication_pad3d_backward::call(grad_output_, self_, padding);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & upsample_bilinear2d_out_vec_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional<at::ArrayRef<double>> scale_factors, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::upsample_bilinear2d_vec_out::call(input_meta, output_size, align_corners, scale_factors, out_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::upsample_bilinear2d_vec_out::call(input_, output_size, align_corners, scale_factors, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::upsample_bilinear2d_vec::call(input_, output_size, align_corners, scale_factors);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & upsample_nearest2d_out_vec_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional<at::ArrayRef<double>> scale_factors, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::upsample_nearest2d_vec_out::call(input_meta, output_size, scale_factors, out_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::upsample_nearest2d_vec_out::call(input_, output_size, scale_factors, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::upsample_nearest2d_vec::call(input_, output_size, scale_factors);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & upsample_linear1d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::upsample_linear1d_out::call(self_meta, output_size, align_corners, scales, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::upsample_linear1d_out::call(self_, output_size, align_corners, scales, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::upsample_linear1d::call(self_, output_size, align_corners, scales);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & upsample_linear1d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::upsample_linear1d_backward_grad_input::call(grad_output_meta, output_size, input_size, align_corners, scales, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::upsample_linear1d_backward_grad_input::call(grad_output_, output_size, input_size, align_corners, scales, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::upsample_linear1d_backward::call(grad_output_, output_size, input_size, align_corners, scales);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & upsample_bilinear2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::upsample_bilinear2d_out::call(self_meta, output_size, align_corners, scales_h, scales_w, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::upsample_bilinear2d_out::call(self_, output_size, align_corners, scales_h, scales_w, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::upsample_bilinear2d::call(self_, output_size, align_corners, scales_h, scales_w);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & upsample_bilinear2d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::upsample_bilinear2d_backward_grad_input::call(grad_output_meta, output_size, input_size, align_corners, scales_h, scales_w, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::upsample_bilinear2d_backward_grad_input::call(grad_output_, output_size, input_size, align_corners, scales_h, scales_w, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::upsample_bilinear2d_backward::call(grad_output_, output_size, input_size, align_corners, scales_h, scales_w);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & _upsample_bilinear2d_aa_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_upsample_bilinear2d_aa_out::call(self_meta, output_size, align_corners, scales_h, scales_w, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_upsample_bilinear2d_aa_out::call(self_, output_size, align_corners, scales_h, scales_w, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_upsample_bilinear2d_aa::call(self_, output_size, align_corners, scales_h, scales_w);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _upsample_bilinear2d_aa_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_upsample_bilinear2d_aa_backward_grad_input::call(grad_output_meta, output_size, input_size, align_corners, scales_h, scales_w, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_upsample_bilinear2d_aa_backward_grad_input::call(grad_output_, output_size, input_size, align_corners, scales_h, scales_w, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_upsample_bilinear2d_aa_backward::call(grad_output_, output_size, input_size, align_corners, scales_h, scales_w);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & upsample_bicubic2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::upsample_bicubic2d_out::call(self_meta, output_size, align_corners, scales_h, scales_w, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::upsample_bicubic2d_out::call(self_, output_size, align_corners, scales_h, scales_w, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::upsample_bicubic2d::call(self_, output_size, align_corners, scales_h, scales_w);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & upsample_bicubic2d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::upsample_bicubic2d_backward_grad_input::call(grad_output_meta, output_size, input_size, align_corners, scales_h, scales_w, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::upsample_bicubic2d_backward_grad_input::call(grad_output_, output_size, input_size, align_corners, scales_h, scales_w, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::upsample_bicubic2d_backward::call(grad_output_, output_size, input_size, align_corners, scales_h, scales_w);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & _upsample_bicubic2d_aa_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_upsample_bicubic2d_aa_out::call(self_meta, output_size, align_corners, scales_h, scales_w, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_upsample_bicubic2d_aa_out::call(self_, output_size, align_corners, scales_h, scales_w, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_upsample_bicubic2d_aa::call(self_, output_size, align_corners, scales_h, scales_w);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _upsample_bicubic2d_aa_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output_meta, output_size, input_size, align_corners, scales_h, scales_w, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output_, output_size, input_size, align_corners, scales_h, scales_w, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_upsample_bicubic2d_aa_backward::call(grad_output_, output_size, input_size, align_corners, scales_h, scales_w);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & upsample_trilinear3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::upsample_trilinear3d_out::call(self_meta, output_size, align_corners, scales_d, scales_h, scales_w, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::upsample_trilinear3d_out::call(self_, output_size, align_corners, scales_d, scales_h, scales_w, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::upsample_trilinear3d::call(self_, output_size, align_corners, scales_d, scales_h, scales_w);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & upsample_trilinear3d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::upsample_trilinear3d_backward_grad_input::call(grad_output_meta, output_size, input_size, align_corners, scales_d, scales_h, scales_w, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::upsample_trilinear3d_backward_grad_input::call(grad_output_, output_size, input_size, align_corners, scales_d, scales_h, scales_w, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::upsample_trilinear3d_backward::call(grad_output_, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & upsample_nearest1d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::upsample_nearest1d_out::call(self_meta, output_size, scales, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::upsample_nearest1d_out::call(self_, output_size, scales, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::upsample_nearest1d::call(self_, output_size, scales);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _upsample_nearest_exact1d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_upsample_nearest_exact1d_out::call(self_meta, output_size, scales, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_upsample_nearest_exact1d_out::call(self_, output_size, scales, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_upsample_nearest_exact1d::call(self_, output_size, scales);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & upsample_nearest1d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::upsample_nearest1d_backward_grad_input::call(grad_output_meta, output_size, input_size, scales, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::upsample_nearest1d_backward_grad_input::call(grad_output_, output_size, input_size, scales, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::upsample_nearest1d_backward::call(grad_output_, output_size, input_size, scales);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & _upsample_nearest_exact1d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output_meta, output_size, input_size, scales, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output_, output_size, input_size, scales, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_upsample_nearest_exact1d_backward::call(grad_output_, output_size, input_size, scales);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & upsample_nearest2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::upsample_nearest2d_out::call(self_meta, output_size, scales_h, scales_w, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::upsample_nearest2d_out::call(self_, output_size, scales_h, scales_w, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::upsample_nearest2d::call(self_, output_size, scales_h, scales_w);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _upsample_nearest_exact2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_upsample_nearest_exact2d_out::call(self_meta, output_size, scales_h, scales_w, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_upsample_nearest_exact2d_out::call(self_, output_size, scales_h, scales_w, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_upsample_nearest_exact2d::call(self_, output_size, scales_h, scales_w);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & upsample_nearest2d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::upsample_nearest2d_backward_grad_input::call(grad_output_meta, output_size, input_size, scales_h, scales_w, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::upsample_nearest2d_backward_grad_input::call(grad_output_, output_size, input_size, scales_h, scales_w, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::upsample_nearest2d_backward::call(grad_output_, output_size, input_size, scales_h, scales_w);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & _upsample_nearest_exact2d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_upsample_nearest_exact2d_backward_grad_input::call(grad_output_meta, output_size, input_size, scales_h, scales_w, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_upsample_nearest_exact2d_backward_grad_input::call(grad_output_, output_size, input_size, scales_h, scales_w, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_upsample_nearest_exact2d_backward::call(grad_output_, output_size, input_size, scales_h, scales_w);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & upsample_nearest3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::upsample_nearest3d_out::call(self_meta, output_size, scales_d, scales_h, scales_w, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::upsample_nearest3d_out::call(self_, output_size, scales_d, scales_h, scales_w, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::upsample_nearest3d::call(self_, output_size, scales_d, scales_h, scales_w);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _upsample_nearest_exact3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_upsample_nearest_exact3d_out::call(self_meta, output_size, scales_d, scales_h, scales_w, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_upsample_nearest_exact3d_out::call(self_, output_size, scales_d, scales_h, scales_w, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_upsample_nearest_exact3d::call(self_, output_size, scales_d, scales_h, scales_w);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & upsample_nearest3d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::upsample_nearest3d_backward_grad_input::call(grad_output_meta, output_size, input_size, scales_d, scales_h, scales_w, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::upsample_nearest3d_backward_grad_input::call(grad_output_, output_size, input_size, scales_d, scales_h, scales_w, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::upsample_nearest3d_backward::call(grad_output_, output_size, input_size, scales_d, scales_h, scales_w);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & _upsample_nearest_exact3d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_upsample_nearest_exact3d_backward_grad_input::call(grad_output_meta, output_size, input_size, scales_d, scales_h, scales_w, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_upsample_nearest_exact3d_backward_grad_input::call(grad_output_, output_size, input_size, scales_d, scales_h, scales_w, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_upsample_nearest_exact3d_backward::call(grad_output_, output_size, input_size, scales_d, scales_h, scales_w);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & sigmoid_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto output_meta = to_meta(output);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::sigmoid_backward_grad_input::call(grad_output_meta, output_meta, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor output_;
      if (at::functionalization::impl::isFunctionalTensor(output)) {
        at::functionalization::impl::sync(output);
        output_ = at::functionalization::impl::from_functional_tensor(output);
      } else {
        output_ = output;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || output.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(output))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::sigmoid_backward_grad_input::call(grad_output_, output_, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::sigmoid_backward::call(grad_output_, output_);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & logit_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, ::std::optional<double> eps, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::logit_backward_grad_input::call(grad_output_meta, self_meta, eps, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::logit_backward_grad_input::call(grad_output_, self_, eps, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::logit_backward::call(grad_output_, self_, eps);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & tanh_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto output_meta = to_meta(output);
        auto grad_input_meta = to_meta(grad_input);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::tanh_backward_grad_input::call(grad_output_meta, output_meta, grad_input_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor output_;
      if (at::functionalization::impl::isFunctionalTensor(output)) {
        at::functionalization::impl::sync(output);
        output_ = at::functionalization::impl::from_functional_tensor(output);
      } else {
        output_ = output;
      }
      
      at::Tensor grad_input_;
      if (at::functionalization::impl::isFunctionalTensor(grad_input)) {
        at::functionalization::impl::sync(grad_input);
        grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input);
      } else {
        grad_input_ = grad_input;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || output.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(output))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::tanh_backward_grad_input::call(grad_output_, output_, grad_input_);
         return grad_input;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::tanh_backward::call(grad_output_, output_);
        }
          auto grad_input_inner = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::replace_(grad_input, tmp_output);
  at::functionalization::impl::commit_update(grad_input);
  at::functionalization::impl::sync(grad_input);
  auto grad_input_inner_updated = at::functionalization::impl::from_functional_tensor(grad_input);
  at::functionalization::impl::propagate_xla_data_direct(grad_input_inner, grad_input_inner_updated);
    return grad_input;
      }
    }

    at::Tensor & slow_conv_transpose2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::slow_conv_transpose2d_out::call(self_meta, weight_meta, kernel_size, bias_meta, stride, padding, output_padding, dilation, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::slow_conv_transpose2d_out::call(self_, weight_, kernel_size, bias_, stride, padding, output_padding, dilation, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::slow_conv_transpose2d::call(self_, weight_, kernel_size, bias_, stride, padding, output_padding, dilation);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & slow_conv_transpose3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::slow_conv_transpose3d_out::call(self_meta, weight_meta, kernel_size, bias_meta, stride, padding, output_padding, dilation, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::slow_conv_transpose3d_out::call(self_, weight_, kernel_size, bias_, stride, padding, output_padding, dilation, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::slow_conv_transpose3d::call(self_, weight_, kernel_size, bias_, stride, padding, output_padding, dilation);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & thnn_conv2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::thnn_conv2d_out::call(self_meta, weight_meta, kernel_size, bias_meta, stride, padding, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::thnn_conv2d_out::call(self_, weight_, kernel_size, bias_, stride, padding, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::thnn_conv2d::call(self_, weight_, kernel_size, bias_, stride, padding);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _slow_conv2d_forward_out_output(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & output) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto output_meta = to_meta(output);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_slow_conv2d_forward_output::call(self_meta, weight_meta, kernel_size, bias_meta, stride, padding, output_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor output_;
      if (at::functionalization::impl::isFunctionalTensor(output)) {
        at::functionalization::impl::sync(output);
        output_ = at::functionalization::impl::from_functional_tensor(output);
      } else {
        output_ = output;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(output))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_slow_conv2d_forward_output::call(self_, weight_, kernel_size, bias_, stride, padding, output_);
         return output;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_slow_conv2d_forward::call(self_, weight_, kernel_size, bias_, stride, padding);
        }
          auto output_inner = at::functionalization::impl::from_functional_tensor(output);
  at::functionalization::impl::replace_(output, tmp_output);
  at::functionalization::impl::commit_update(output);
  at::functionalization::impl::sync(output);
  auto output_inner_updated = at::functionalization::impl::from_functional_tensor(output);
  at::functionalization::impl::propagate_xla_data_direct(output_inner, output_inner_updated);
    return output;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _slow_conv2d_backward_out_output_mask_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_output_meta = to_meta(grad_output);
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        auto out2_meta = to_meta(out2);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_slow_conv2d_backward_output_mask_out::call(grad_output_meta, self_meta, weight_meta, kernel_size, stride, padding, output_mask, out0_meta, out1_meta, out2_meta);
      }
      
      at::Tensor grad_output_;
      if (at::functionalization::impl::isFunctionalTensor(grad_output)) {
        at::functionalization::impl::sync(grad_output);
        grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output);
      } else {
        grad_output_ = grad_output;
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      
      at::Tensor out2_;
      if (at::functionalization::impl::isFunctionalTensor(out2)) {
        at::functionalization::impl::sync(out2);
        out2_ = at::functionalization::impl::from_functional_tensor(out2);
      } else {
        out2_ = out2;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad_output.device().type() == c10::DeviceType::XLA || self.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_slow_conv2d_backward_output_mask_out::call(grad_output_, self_, weight_, kernel_size, stride, padding, output_mask, out0_, out1_, out2_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_slow_conv2d_backward_output_mask::call(grad_output_, self_, weight_, kernel_size, stride, padding, output_mask);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
  auto out2_inner = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::replace_(out2, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out2);
  at::functionalization::impl::sync(out2);
  auto out2_inner_updated = at::functionalization::impl::from_functional_tensor(out2);
  at::functionalization::impl::propagate_xla_data_direct(out2_inner, out2_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);
      }
    }

    at::Tensor & _conv_depthwise2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_conv_depthwise2d_out::call(self_meta, weight_meta, kernel_size, bias_meta, stride, padding, dilation, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_conv_depthwise2d_out::call(self_, weight_, kernel_size, bias_, stride, padding, dilation, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_conv_depthwise2d::call(self_, weight_, kernel_size, bias_, stride, padding, dilation);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & conv_depthwise3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::conv_depthwise3d_out::call(self_meta, weight_meta, kernel_size, bias_meta, stride, padding, dilation, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::conv_depthwise3d_out::call(self_, weight_, kernel_size, bias_, stride, padding, dilation, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::conv_depthwise3d::call(self_, weight_, kernel_size, bias_, stride, padding, dilation);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & slow_conv3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::slow_conv3d_out::call(self_meta, weight_meta, kernel_size, bias_meta, stride, padding, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::slow_conv3d_out::call(self_, weight_, kernel_size, bias_, stride, padding, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::slow_conv3d::call(self_, weight_, kernel_size, bias_, stride, padding);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & slow_conv3d_forward_out_output(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & output) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto output_meta = to_meta(output);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::slow_conv3d_forward_output::call(self_meta, weight_meta, kernel_size, bias_meta, stride, padding, output_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor output_;
      if (at::functionalization::impl::isFunctionalTensor(output)) {
        at::functionalization::impl::sync(output);
        output_ = at::functionalization::impl::from_functional_tensor(output);
      } else {
        output_ = output;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(output))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::slow_conv3d_forward_output::call(self_, weight_, kernel_size, bias_, stride, padding, output_);
         return output;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::slow_conv3d_forward::call(self_, weight_, kernel_size, bias_, stride, padding);
        }
          auto output_inner = at::functionalization::impl::from_functional_tensor(output);
  at::functionalization::impl::replace_(output, tmp_output);
  at::functionalization::impl::commit_update(output);
  at::functionalization::impl::sync(output);
  auto output_inner_updated = at::functionalization::impl::from_functional_tensor(output);
  at::functionalization::impl::propagate_xla_data_direct(output_inner, output_inner_updated);
    return output;
      }
    }

    at::Tensor & slow_conv_dilated2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::slow_conv_dilated2d_out::call(self_meta, weight_meta, kernel_size, bias_meta, stride, padding, dilation, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::slow_conv_dilated2d_out::call(self_, weight_, kernel_size, bias_, stride, padding, dilation, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::slow_conv_dilated2d::call(self_, weight_, kernel_size, bias_, stride, padding, dilation);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & slow_conv_dilated3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto weight_meta = to_meta(weight);
        auto bias_meta = to_meta(bias);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::slow_conv_dilated3d_out::call(self_meta, weight_meta, kernel_size, bias_meta, stride, padding, dilation, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor weight_;
      if (at::functionalization::impl::isFunctionalTensor(weight)) {
        at::functionalization::impl::sync(weight);
        weight_ = at::functionalization::impl::from_functional_tensor(weight);
      } else {
        weight_ = weight;
      }
      
      ::std::optional<at::Tensor> bias_;
      if (at::functionalization::impl::isFunctionalTensor(bias)) {
        at::functionalization::impl::sync(bias);
        bias_ = at::functionalization::impl::from_functional_tensor(bias);
      } else {
        bias_ = bias;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || weight.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::slow_conv_dilated3d_out::call(self_, weight_, kernel_size, bias_, stride, padding, dilation, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::slow_conv_dilated3d::call(self_, weight_, kernel_size, bias_, stride, padding, dilation);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & col2im_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::col2im_out::call(self_meta, output_size, kernel_size, dilation, padding, stride, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::col2im_out::call(self_, output_size, kernel_size, dilation, padding, stride, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::col2im::call(self_, output_size, kernel_size, dilation, padding, stride);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & column_stack_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto tensors_meta = to_meta(tensors);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::column_stack_out::call(tensors_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> tensors_;
      if (at::functionalization::impl::isFunctionalTensor(tensors)) {
        at::functionalization::impl::sync(tensors);
        tensors_ = at::functionalization::impl::from_functional_tensor(tensors);
      } else {
        tensors_ = tensors.vec();
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(tensors))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::column_stack_out::call(tensors_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::column_stack::call(tensors_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & im2col_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::im2col_out::call(self_meta, kernel_size, dilation, padding, stride, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::im2col_out::call(self_, kernel_size, dilation, padding, stride, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::im2col::call(self_, kernel_size, dilation, padding, stride);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & isinf_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::isinf_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::isinf_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::isinf::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & isposinf_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::isposinf_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::isposinf_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::isposinf::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & isneginf_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::isneginf_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::isneginf_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::isneginf::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_entr_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_entr_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_entr_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_entr::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_ndtri_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_ndtri_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_ndtri_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_ndtri::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_log_ndtr_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_log_ndtr_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_log_ndtr_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_log_ndtr::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_expm1_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_expm1_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_expm1_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_expm1::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_exp2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_exp2_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_exp2_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_exp2::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_psi_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_psi_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_psi_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_psi::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_digamma_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_digamma_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_digamma_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_digamma::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_gammaln_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_gammaln_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_gammaln_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_gammaln::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_erf_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_erf_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_erf_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_erf::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_erfc_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_erfc_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_erfc_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_erfc::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_erfcx_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_erfcx_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_erfcx_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_erfcx::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_erfinv_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_erfinv_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_erfinv_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_erfinv::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_ndtr_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_ndtr_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_ndtr_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_ndtr::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_xlog1py_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_xlog1py_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_xlog1py_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_xlog1py::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_xlog1py_out_self_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_xlog1py_self_scalar_out::call(self, other_meta, out_meta);
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_xlog1py_self_scalar_out::call(self, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_xlog1py_self_scalar::call(self, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_xlog1py_out_other_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_xlog1py_other_scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_xlog1py_other_scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_xlog1py_other_scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_xlogy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_xlogy_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_xlogy_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_xlogy::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_xlogy_out_self_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_xlogy_self_scalar_out::call(self, other_meta, out_meta);
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_xlogy_self_scalar_out::call(self, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_xlogy_self_scalar::call(self, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_xlogy_out_other_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_xlogy_other_scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_xlogy_other_scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_xlogy_other_scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_zeta_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_zeta_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_zeta_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_zeta::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_zeta_out_self_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_zeta_self_scalar_out::call(self, other_meta, out_meta);
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_zeta_self_scalar_out::call(self, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_zeta_self_scalar::call(self, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_zeta_out_other_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_zeta_other_scalar_out::call(self_meta, other, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_zeta_other_scalar_out::call(self_, other, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_zeta_other_scalar::call(self_, other);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_i0_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_i0_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_i0_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_i0::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_i0e_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_i0e_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_i0e_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_i0e::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_i1_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_i1_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_i1_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_i1::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_i1e_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_i1e_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_i1e_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_i1e::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_logit_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<double> eps, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_logit_out::call(self_meta, eps, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_logit_out::call(self_, eps, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_logit::call(self_, eps);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_polygamma_out_out(c10::DispatchKeySet dispatchKeySet, int64_t n, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_polygamma_out::call(n, self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_polygamma_out::call(n, self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_polygamma::call(n, self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_logsumexp_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_logsumexp_out::call(self_meta, dim, keepdim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_logsumexp_out::call(self_, dim, keepdim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_logsumexp::call(self_, dim, keepdim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_expit_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_expit_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_expit_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_expit::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_sinc_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_sinc_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_sinc_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_sinc::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_round_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t decimals, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_round_out::call(self_meta, decimals, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_round_out::call(self_, decimals, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_round::call(self_, decimals);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_log1p_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_log1p_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_log1p_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_log1p::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_gammainc_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_gammainc_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_gammainc_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_gammainc::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_gammaincc_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_gammaincc_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_gammaincc_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_gammaincc::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_multigammaln_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t p, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_multigammaln_out::call(self_meta, p, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_multigammaln_out::call(self_, p, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_multigammaln::call(self_, p);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fft_fft_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fft_fft_out::call(self_meta, n, dim, norm, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fft_fft_out::call(self_, n, dim, norm, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fft_fft::call(self_, n, dim, norm);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fft_ifft_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fft_ifft_out::call(self_meta, n, dim, norm, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fft_ifft_out::call(self_, n, dim, norm, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fft_ifft::call(self_, n, dim, norm);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fft_rfft_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fft_rfft_out::call(self_meta, n, dim, norm, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fft_rfft_out::call(self_, n, dim, norm, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fft_rfft::call(self_, n, dim, norm);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fft_irfft_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fft_irfft_out::call(self_meta, n, dim, norm, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fft_irfft_out::call(self_, n, dim, norm, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fft_irfft::call(self_, n, dim, norm);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fft_hfft_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fft_hfft_out::call(self_meta, n, dim, norm, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fft_hfft_out::call(self_, n, dim, norm, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fft_hfft::call(self_, n, dim, norm);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fft_ihfft_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fft_ihfft_out::call(self_meta, n, dim, norm, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fft_ihfft_out::call(self_, n, dim, norm, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fft_ihfft::call(self_, n, dim, norm);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fft_fft2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fft_fft2_out::call(self_meta, s, dim, norm, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fft_fft2_out::call(self_, s, dim, norm, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fft_fft2::call(self_, s, dim, norm);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fft_ifft2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fft_ifft2_out::call(self_meta, s, dim, norm, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fft_ifft2_out::call(self_, s, dim, norm, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fft_ifft2::call(self_, s, dim, norm);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fft_rfft2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fft_rfft2_out::call(self_meta, s, dim, norm, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fft_rfft2_out::call(self_, s, dim, norm, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fft_rfft2::call(self_, s, dim, norm);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fft_irfft2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fft_irfft2_out::call(self_meta, s, dim, norm, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fft_irfft2_out::call(self_, s, dim, norm, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fft_irfft2::call(self_, s, dim, norm);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fft_hfft2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fft_hfft2_out::call(self_meta, s, dim, norm, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fft_hfft2_out::call(self_, s, dim, norm, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fft_hfft2::call(self_, s, dim, norm);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fft_ihfft2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fft_ihfft2_out::call(self_meta, s, dim, norm, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fft_ihfft2_out::call(self_, s, dim, norm, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fft_ihfft2::call(self_, s, dim, norm);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fft_fftn_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fft_fftn_out::call(self_meta, s, dim, norm, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fft_fftn_out::call(self_, s, dim, norm, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fft_fftn::call(self_, s, dim, norm);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fft_ifftn_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fft_ifftn_out::call(self_meta, s, dim, norm, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fft_ifftn_out::call(self_, s, dim, norm, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fft_ifftn::call(self_, s, dim, norm);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fft_rfftn_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fft_rfftn_out::call(self_meta, s, dim, norm, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fft_rfftn_out::call(self_, s, dim, norm, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fft_rfftn::call(self_, s, dim, norm);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fft_irfftn_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fft_irfftn_out::call(self_meta, s, dim, norm, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fft_irfftn_out::call(self_, s, dim, norm, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fft_irfftn::call(self_, s, dim, norm);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fft_hfftn_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fft_hfftn_out::call(self_meta, s, dim, norm, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fft_hfftn_out::call(self_, s, dim, norm, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fft_hfftn::call(self_, s, dim, norm);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fft_ihfftn_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fft_ihfftn_out::call(self_meta, s, dim, norm, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fft_ihfftn_out::call(self_, s, dim, norm, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fft_ihfftn::call(self_, s, dim, norm);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fft_fftfreq_out_out(c10::DispatchKeySet dispatchKeySet, int64_t n, double d, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fft_fftfreq_out::call(n, d, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fft_fftfreq_out::call(n, d, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fft_fftfreq::call(n, d, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & fft_rfftfreq_out_out(c10::DispatchKeySet dispatchKeySet, int64_t n, double d, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::fft_rfftfreq_out::call(n, d, out_meta);
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false)) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::fft_rfftfreq_out::call(n, d, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::fft_rfftfreq::call(n, d, out_.scalar_type(), out_.layout(), out_.device(), ::std::nullopt);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> linalg_cholesky_ex_out_L(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto L_meta = to_meta(L);
        auto info_meta = to_meta(info);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_cholesky_ex_L::call(self_meta, upper, check_errors, L_meta, info_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor L_;
      if (at::functionalization::impl::isFunctionalTensor(L)) {
        at::functionalization::impl::sync(L);
        L_ = at::functionalization::impl::from_functional_tensor(L);
      } else {
        L_ = L;
      }
      
      at::Tensor info_;
      if (at::functionalization::impl::isFunctionalTensor(info)) {
        at::functionalization::impl::sync(info);
        info_ = at::functionalization::impl::from_functional_tensor(info);
      } else {
        info_ = info;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(L) && at::functionalization::impl::isFunctionalTensor(info))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_cholesky_ex_L::call(self_, upper, check_errors, L_, info_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(L, info);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_cholesky_ex::call(self_, upper, check_errors);
        }
          auto L_inner = at::functionalization::impl::from_functional_tensor(L);
  at::functionalization::impl::replace_(L, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(L);
  at::functionalization::impl::sync(L);
  auto L_inner_updated = at::functionalization::impl::from_functional_tensor(L);
  at::functionalization::impl::propagate_xla_data_direct(L_inner, L_inner_updated);
  auto info_inner = at::functionalization::impl::from_functional_tensor(info);
  at::functionalization::impl::replace_(info, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(info);
  at::functionalization::impl::sync(info);
  auto info_inner_updated = at::functionalization::impl::from_functional_tensor(info);
  at::functionalization::impl::propagate_xla_data_direct(info_inner, info_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(L, info);
      }
    }

    at::Tensor & linalg_cholesky_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_cholesky_out::call(self_meta, upper, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_cholesky_out::call(self_, upper, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_cholesky::call(self_, upper);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & linalg_cross_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_cross_out::call(self_meta, other_meta, dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_cross_out::call(self_, other_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_cross::call(self_, other_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> linalg_lu_factor_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto A_meta = to_meta(A);
        auto LU_meta = to_meta(LU);
        auto pivots_meta = to_meta(pivots);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_lu_factor_out::call(A_meta, pivot, LU_meta, pivots_meta);
      }
      
      at::Tensor A_;
      if (at::functionalization::impl::isFunctionalTensor(A)) {
        at::functionalization::impl::sync(A);
        A_ = at::functionalization::impl::from_functional_tensor(A);
      } else {
        A_ = A;
      }
      
      at::Tensor LU_;
      if (at::functionalization::impl::isFunctionalTensor(LU)) {
        at::functionalization::impl::sync(LU);
        LU_ = at::functionalization::impl::from_functional_tensor(LU);
      } else {
        LU_ = LU;
      }
      
      at::Tensor pivots_;
      if (at::functionalization::impl::isFunctionalTensor(pivots)) {
        at::functionalization::impl::sync(pivots);
        pivots_ = at::functionalization::impl::from_functional_tensor(pivots);
      } else {
        pivots_ = pivots;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(LU) && at::functionalization::impl::isFunctionalTensor(pivots))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || A.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(A))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_lu_factor_out::call(A_, pivot, LU_, pivots_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(LU, pivots);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_lu_factor::call(A_, pivot);
        }
          auto LU_inner = at::functionalization::impl::from_functional_tensor(LU);
  at::functionalization::impl::replace_(LU, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(LU);
  at::functionalization::impl::sync(LU);
  auto LU_inner_updated = at::functionalization::impl::from_functional_tensor(LU);
  at::functionalization::impl::propagate_xla_data_direct(LU_inner, LU_inner_updated);
  auto pivots_inner = at::functionalization::impl::from_functional_tensor(pivots);
  at::functionalization::impl::replace_(pivots, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(pivots);
  at::functionalization::impl::sync(pivots);
  auto pivots_inner_updated = at::functionalization::impl::from_functional_tensor(pivots);
  at::functionalization::impl::propagate_xla_data_direct(pivots_inner, pivots_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(LU, pivots);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_factor_ex_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto A_meta = to_meta(A);
        auto LU_meta = to_meta(LU);
        auto pivots_meta = to_meta(pivots);
        auto info_meta = to_meta(info);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_lu_factor_ex_out::call(A_meta, pivot, check_errors, LU_meta, pivots_meta, info_meta);
      }
      
      at::Tensor A_;
      if (at::functionalization::impl::isFunctionalTensor(A)) {
        at::functionalization::impl::sync(A);
        A_ = at::functionalization::impl::from_functional_tensor(A);
      } else {
        A_ = A;
      }
      
      at::Tensor LU_;
      if (at::functionalization::impl::isFunctionalTensor(LU)) {
        at::functionalization::impl::sync(LU);
        LU_ = at::functionalization::impl::from_functional_tensor(LU);
      } else {
        LU_ = LU;
      }
      
      at::Tensor pivots_;
      if (at::functionalization::impl::isFunctionalTensor(pivots)) {
        at::functionalization::impl::sync(pivots);
        pivots_ = at::functionalization::impl::from_functional_tensor(pivots);
      } else {
        pivots_ = pivots;
      }
      
      at::Tensor info_;
      if (at::functionalization::impl::isFunctionalTensor(info)) {
        at::functionalization::impl::sync(info);
        info_ = at::functionalization::impl::from_functional_tensor(info);
      } else {
        info_ = info;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(LU) && at::functionalization::impl::isFunctionalTensor(pivots) && at::functionalization::impl::isFunctionalTensor(info))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || A.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(A))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_lu_factor_ex_out::call(A_, pivot, check_errors, LU_, pivots_, info_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(LU, pivots, info);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_lu_factor_ex::call(A_, pivot, check_errors);
        }
          auto LU_inner = at::functionalization::impl::from_functional_tensor(LU);
  at::functionalization::impl::replace_(LU, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(LU);
  at::functionalization::impl::sync(LU);
  auto LU_inner_updated = at::functionalization::impl::from_functional_tensor(LU);
  at::functionalization::impl::propagate_xla_data_direct(LU_inner, LU_inner_updated);
  auto pivots_inner = at::functionalization::impl::from_functional_tensor(pivots);
  at::functionalization::impl::replace_(pivots, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(pivots);
  at::functionalization::impl::sync(pivots);
  auto pivots_inner_updated = at::functionalization::impl::from_functional_tensor(pivots);
  at::functionalization::impl::propagate_xla_data_direct(pivots_inner, pivots_inner_updated);
  auto info_inner = at::functionalization::impl::from_functional_tensor(info);
  at::functionalization::impl::replace_(info, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(info);
  at::functionalization::impl::sync(info);
  auto info_inner_updated = at::functionalization::impl::from_functional_tensor(info);
  at::functionalization::impl::propagate_xla_data_direct(info_inner, info_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(LU, pivots, info);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto A_meta = to_meta(A);
        auto P_meta = to_meta(P);
        auto L_meta = to_meta(L);
        auto U_meta = to_meta(U);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_lu_out::call(A_meta, pivot, P_meta, L_meta, U_meta);
      }
      
      at::Tensor A_;
      if (at::functionalization::impl::isFunctionalTensor(A)) {
        at::functionalization::impl::sync(A);
        A_ = at::functionalization::impl::from_functional_tensor(A);
      } else {
        A_ = A;
      }
      
      at::Tensor P_;
      if (at::functionalization::impl::isFunctionalTensor(P)) {
        at::functionalization::impl::sync(P);
        P_ = at::functionalization::impl::from_functional_tensor(P);
      } else {
        P_ = P;
      }
      
      at::Tensor L_;
      if (at::functionalization::impl::isFunctionalTensor(L)) {
        at::functionalization::impl::sync(L);
        L_ = at::functionalization::impl::from_functional_tensor(L);
      } else {
        L_ = L;
      }
      
      at::Tensor U_;
      if (at::functionalization::impl::isFunctionalTensor(U)) {
        at::functionalization::impl::sync(U);
        U_ = at::functionalization::impl::from_functional_tensor(U);
      } else {
        U_ = U;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(P) && at::functionalization::impl::isFunctionalTensor(L) && at::functionalization::impl::isFunctionalTensor(U))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || A.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(A))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_lu_out::call(A_, pivot, P_, L_, U_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(P, L, U);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_lu::call(A_, pivot);
        }
          auto P_inner = at::functionalization::impl::from_functional_tensor(P);
  at::functionalization::impl::replace_(P, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(P);
  at::functionalization::impl::sync(P);
  auto P_inner_updated = at::functionalization::impl::from_functional_tensor(P);
  at::functionalization::impl::propagate_xla_data_direct(P_inner, P_inner_updated);
  auto L_inner = at::functionalization::impl::from_functional_tensor(L);
  at::functionalization::impl::replace_(L, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(L);
  at::functionalization::impl::sync(L);
  auto L_inner_updated = at::functionalization::impl::from_functional_tensor(L);
  at::functionalization::impl::propagate_xla_data_direct(L_inner, L_inner_updated);
  auto U_inner = at::functionalization::impl::from_functional_tensor(U);
  at::functionalization::impl::replace_(U, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(U);
  at::functionalization::impl::sync(U);
  auto U_inner_updated = at::functionalization::impl::from_functional_tensor(U);
  at::functionalization::impl::propagate_xla_data_direct(U_inner, U_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(P, L, U);
      }
    }

    at::Tensor & linalg_lu_solve_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto LU_meta = to_meta(LU);
        auto pivots_meta = to_meta(pivots);
        auto B_meta = to_meta(B);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_lu_solve_out::call(LU_meta, pivots_meta, B_meta, left, adjoint, out_meta);
      }
      
      at::Tensor LU_;
      if (at::functionalization::impl::isFunctionalTensor(LU)) {
        at::functionalization::impl::sync(LU);
        LU_ = at::functionalization::impl::from_functional_tensor(LU);
      } else {
        LU_ = LU;
      }
      
      at::Tensor pivots_;
      if (at::functionalization::impl::isFunctionalTensor(pivots)) {
        at::functionalization::impl::sync(pivots);
        pivots_ = at::functionalization::impl::from_functional_tensor(pivots);
      } else {
        pivots_ = pivots;
      }
      
      at::Tensor B_;
      if (at::functionalization::impl::isFunctionalTensor(B)) {
        at::functionalization::impl::sync(B);
        B_ = at::functionalization::impl::from_functional_tensor(B);
      } else {
        B_ = B;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || LU.device().type() == c10::DeviceType::XLA || pivots.device().type() == c10::DeviceType::XLA || B.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(LU) || at::functionalization::impl::isFunctionalTensor(pivots) || at::functionalization::impl::isFunctionalTensor(B))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_lu_solve_out::call(LU_, pivots_, B_, left, adjoint, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_lu_solve::call(LU_, pivots_, B_, left, adjoint);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_det_out_result(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto A_meta = to_meta(A);
        auto result_meta = to_meta(result);
        auto LU_meta = to_meta(LU);
        auto pivots_meta = to_meta(pivots);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_linalg_det_result::call(A_meta, result_meta, LU_meta, pivots_meta);
      }
      
      at::Tensor A_;
      if (at::functionalization::impl::isFunctionalTensor(A)) {
        at::functionalization::impl::sync(A);
        A_ = at::functionalization::impl::from_functional_tensor(A);
      } else {
        A_ = A;
      }
      
      at::Tensor result_;
      if (at::functionalization::impl::isFunctionalTensor(result)) {
        at::functionalization::impl::sync(result);
        result_ = at::functionalization::impl::from_functional_tensor(result);
      } else {
        result_ = result;
      }
      
      at::Tensor LU_;
      if (at::functionalization::impl::isFunctionalTensor(LU)) {
        at::functionalization::impl::sync(LU);
        LU_ = at::functionalization::impl::from_functional_tensor(LU);
      } else {
        LU_ = LU;
      }
      
      at::Tensor pivots_;
      if (at::functionalization::impl::isFunctionalTensor(pivots)) {
        at::functionalization::impl::sync(pivots);
        pivots_ = at::functionalization::impl::from_functional_tensor(pivots);
      } else {
        pivots_ = pivots;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(result) && at::functionalization::impl::isFunctionalTensor(LU) && at::functionalization::impl::isFunctionalTensor(pivots))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || A.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(A))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_linalg_det_result::call(A_, result_, LU_, pivots_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(result, LU, pivots);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_linalg_det::call(A_);
        }
          auto result_inner = at::functionalization::impl::from_functional_tensor(result);
  at::functionalization::impl::replace_(result, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(result);
  at::functionalization::impl::sync(result);
  auto result_inner_updated = at::functionalization::impl::from_functional_tensor(result);
  at::functionalization::impl::propagate_xla_data_direct(result_inner, result_inner_updated);
  auto LU_inner = at::functionalization::impl::from_functional_tensor(LU);
  at::functionalization::impl::replace_(LU, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(LU);
  at::functionalization::impl::sync(LU);
  auto LU_inner_updated = at::functionalization::impl::from_functional_tensor(LU);
  at::functionalization::impl::propagate_xla_data_direct(LU_inner, LU_inner_updated);
  auto pivots_inner = at::functionalization::impl::from_functional_tensor(pivots);
  at::functionalization::impl::replace_(pivots, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(pivots);
  at::functionalization::impl::sync(pivots);
  auto pivots_inner_updated = at::functionalization::impl::from_functional_tensor(pivots);
  at::functionalization::impl::propagate_xla_data_direct(pivots_inner, pivots_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(result, LU, pivots);
      }
    }

    at::Tensor & linalg_det_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto A_meta = to_meta(A);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_det_out::call(A_meta, out_meta);
      }
      
      at::Tensor A_;
      if (at::functionalization::impl::isFunctionalTensor(A)) {
        at::functionalization::impl::sync(A);
        A_ = at::functionalization::impl::from_functional_tensor(A);
      } else {
        A_ = A;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || A.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(A))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_det_out::call(A_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_det::call(A_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_ldl_factor_ex_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto LD_meta = to_meta(LD);
        auto pivots_meta = to_meta(pivots);
        auto info_meta = to_meta(info);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_ldl_factor_ex_out::call(self_meta, hermitian, check_errors, LD_meta, pivots_meta, info_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor LD_;
      if (at::functionalization::impl::isFunctionalTensor(LD)) {
        at::functionalization::impl::sync(LD);
        LD_ = at::functionalization::impl::from_functional_tensor(LD);
      } else {
        LD_ = LD;
      }
      
      at::Tensor pivots_;
      if (at::functionalization::impl::isFunctionalTensor(pivots)) {
        at::functionalization::impl::sync(pivots);
        pivots_ = at::functionalization::impl::from_functional_tensor(pivots);
      } else {
        pivots_ = pivots;
      }
      
      at::Tensor info_;
      if (at::functionalization::impl::isFunctionalTensor(info)) {
        at::functionalization::impl::sync(info);
        info_ = at::functionalization::impl::from_functional_tensor(info);
      } else {
        info_ = info;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(LD) && at::functionalization::impl::isFunctionalTensor(pivots) && at::functionalization::impl::isFunctionalTensor(info))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_ldl_factor_ex_out::call(self_, hermitian, check_errors, LD_, pivots_, info_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(LD, pivots, info);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_ldl_factor_ex::call(self_, hermitian, check_errors);
        }
          auto LD_inner = at::functionalization::impl::from_functional_tensor(LD);
  at::functionalization::impl::replace_(LD, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(LD);
  at::functionalization::impl::sync(LD);
  auto LD_inner_updated = at::functionalization::impl::from_functional_tensor(LD);
  at::functionalization::impl::propagate_xla_data_direct(LD_inner, LD_inner_updated);
  auto pivots_inner = at::functionalization::impl::from_functional_tensor(pivots);
  at::functionalization::impl::replace_(pivots, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(pivots);
  at::functionalization::impl::sync(pivots);
  auto pivots_inner_updated = at::functionalization::impl::from_functional_tensor(pivots);
  at::functionalization::impl::propagate_xla_data_direct(pivots_inner, pivots_inner_updated);
  auto info_inner = at::functionalization::impl::from_functional_tensor(info);
  at::functionalization::impl::replace_(info, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(info);
  at::functionalization::impl::sync(info);
  auto info_inner_updated = at::functionalization::impl::from_functional_tensor(info);
  at::functionalization::impl::propagate_xla_data_direct(info_inner, info_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(LD, pivots, info);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> linalg_ldl_factor_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto LD_meta = to_meta(LD);
        auto pivots_meta = to_meta(pivots);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_ldl_factor_out::call(self_meta, hermitian, LD_meta, pivots_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor LD_;
      if (at::functionalization::impl::isFunctionalTensor(LD)) {
        at::functionalization::impl::sync(LD);
        LD_ = at::functionalization::impl::from_functional_tensor(LD);
      } else {
        LD_ = LD;
      }
      
      at::Tensor pivots_;
      if (at::functionalization::impl::isFunctionalTensor(pivots)) {
        at::functionalization::impl::sync(pivots);
        pivots_ = at::functionalization::impl::from_functional_tensor(pivots);
      } else {
        pivots_ = pivots;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(LD) && at::functionalization::impl::isFunctionalTensor(pivots))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_ldl_factor_out::call(self_, hermitian, LD_, pivots_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(LD, pivots);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_ldl_factor::call(self_, hermitian);
        }
          auto LD_inner = at::functionalization::impl::from_functional_tensor(LD);
  at::functionalization::impl::replace_(LD, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(LD);
  at::functionalization::impl::sync(LD);
  auto LD_inner_updated = at::functionalization::impl::from_functional_tensor(LD);
  at::functionalization::impl::propagate_xla_data_direct(LD_inner, LD_inner_updated);
  auto pivots_inner = at::functionalization::impl::from_functional_tensor(pivots);
  at::functionalization::impl::replace_(pivots, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(pivots);
  at::functionalization::impl::sync(pivots);
  auto pivots_inner_updated = at::functionalization::impl::from_functional_tensor(pivots);
  at::functionalization::impl::propagate_xla_data_direct(pivots_inner, pivots_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(LD, pivots);
      }
    }

    at::Tensor & linalg_ldl_solve_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto LD_meta = to_meta(LD);
        auto pivots_meta = to_meta(pivots);
        auto B_meta = to_meta(B);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_ldl_solve_out::call(LD_meta, pivots_meta, B_meta, hermitian, out_meta);
      }
      
      at::Tensor LD_;
      if (at::functionalization::impl::isFunctionalTensor(LD)) {
        at::functionalization::impl::sync(LD);
        LD_ = at::functionalization::impl::from_functional_tensor(LD);
      } else {
        LD_ = LD;
      }
      
      at::Tensor pivots_;
      if (at::functionalization::impl::isFunctionalTensor(pivots)) {
        at::functionalization::impl::sync(pivots);
        pivots_ = at::functionalization::impl::from_functional_tensor(pivots);
      } else {
        pivots_ = pivots;
      }
      
      at::Tensor B_;
      if (at::functionalization::impl::isFunctionalTensor(B)) {
        at::functionalization::impl::sync(B);
        B_ = at::functionalization::impl::from_functional_tensor(B);
      } else {
        B_ = B;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || LD.device().type() == c10::DeviceType::XLA || pivots.device().type() == c10::DeviceType::XLA || B.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(LD) || at::functionalization::impl::isFunctionalTensor(pivots) || at::functionalization::impl::isFunctionalTensor(B))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_ldl_solve_out::call(LD_, pivots_, B_, hermitian, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_ldl_solve::call(LD_, pivots_, B_, hermitian);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> linalg_lstsq_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & b, ::std::optional<double> rcond, ::std::optional<c10::string_view> driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto b_meta = to_meta(b);
        auto solution_meta = to_meta(solution);
        auto residuals_meta = to_meta(residuals);
        auto rank_meta = to_meta(rank);
        auto singular_values_meta = to_meta(singular_values);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_lstsq_out::call(self_meta, b_meta, rcond, driver, solution_meta, residuals_meta, rank_meta, singular_values_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor b_;
      if (at::functionalization::impl::isFunctionalTensor(b)) {
        at::functionalization::impl::sync(b);
        b_ = at::functionalization::impl::from_functional_tensor(b);
      } else {
        b_ = b;
      }
      
      at::Tensor solution_;
      if (at::functionalization::impl::isFunctionalTensor(solution)) {
        at::functionalization::impl::sync(solution);
        solution_ = at::functionalization::impl::from_functional_tensor(solution);
      } else {
        solution_ = solution;
      }
      
      at::Tensor residuals_;
      if (at::functionalization::impl::isFunctionalTensor(residuals)) {
        at::functionalization::impl::sync(residuals);
        residuals_ = at::functionalization::impl::from_functional_tensor(residuals);
      } else {
        residuals_ = residuals;
      }
      
      at::Tensor rank_;
      if (at::functionalization::impl::isFunctionalTensor(rank)) {
        at::functionalization::impl::sync(rank);
        rank_ = at::functionalization::impl::from_functional_tensor(rank);
      } else {
        rank_ = rank;
      }
      
      at::Tensor singular_values_;
      if (at::functionalization::impl::isFunctionalTensor(singular_values)) {
        at::functionalization::impl::sync(singular_values);
        singular_values_ = at::functionalization::impl::from_functional_tensor(singular_values);
      } else {
        singular_values_ = singular_values;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(solution) && at::functionalization::impl::isFunctionalTensor(residuals) && at::functionalization::impl::isFunctionalTensor(rank) && at::functionalization::impl::isFunctionalTensor(singular_values))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || b.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(b))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_lstsq_out::call(self_, b_, rcond, driver, solution_, residuals_, rank_, singular_values_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(solution, residuals, rank, singular_values);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_lstsq::call(self_, b_, rcond, driver);
        }
          auto solution_inner = at::functionalization::impl::from_functional_tensor(solution);
  at::functionalization::impl::replace_(solution, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(solution);
  at::functionalization::impl::sync(solution);
  auto solution_inner_updated = at::functionalization::impl::from_functional_tensor(solution);
  at::functionalization::impl::propagate_xla_data_direct(solution_inner, solution_inner_updated);
  auto residuals_inner = at::functionalization::impl::from_functional_tensor(residuals);
  at::functionalization::impl::replace_(residuals, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(residuals);
  at::functionalization::impl::sync(residuals);
  auto residuals_inner_updated = at::functionalization::impl::from_functional_tensor(residuals);
  at::functionalization::impl::propagate_xla_data_direct(residuals_inner, residuals_inner_updated);
  auto rank_inner = at::functionalization::impl::from_functional_tensor(rank);
  at::functionalization::impl::replace_(rank, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(rank);
  at::functionalization::impl::sync(rank);
  auto rank_inner_updated = at::functionalization::impl::from_functional_tensor(rank);
  at::functionalization::impl::propagate_xla_data_direct(rank_inner, rank_inner_updated);
  auto singular_values_inner = at::functionalization::impl::from_functional_tensor(singular_values);
  at::functionalization::impl::replace_(singular_values, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(singular_values);
  at::functionalization::impl::sync(singular_values);
  auto singular_values_inner_updated = at::functionalization::impl::from_functional_tensor(singular_values);
  at::functionalization::impl::propagate_xla_data_direct(singular_values_inner, singular_values_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(solution, residuals, rank, singular_values);
      }
    }

    at::Tensor & linalg_matmul_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_matmul_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_matmul_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_matmul::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & linalg_vecdot_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & y, int64_t dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto y_meta = to_meta(y);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_vecdot_out::call(x_meta, y_meta, dim, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor y_;
      if (at::functionalization::impl::isFunctionalTensor(y)) {
        at::functionalization::impl::sync(y);
        y_ = at::functionalization::impl::from_functional_tensor(y);
      } else {
        y_ = y;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA || y.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(y))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_vecdot_out::call(x_, y_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_vecdot::call(x_, y_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & linalg_matrix_exp_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_matrix_exp_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_matrix_exp_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_matrix_exp::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_slogdet_out_sign(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto A_meta = to_meta(A);
        auto sign_meta = to_meta(sign);
        auto logabsdet_meta = to_meta(logabsdet);
        auto LU_meta = to_meta(LU);
        auto pivots_meta = to_meta(pivots);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_linalg_slogdet_sign::call(A_meta, sign_meta, logabsdet_meta, LU_meta, pivots_meta);
      }
      
      at::Tensor A_;
      if (at::functionalization::impl::isFunctionalTensor(A)) {
        at::functionalization::impl::sync(A);
        A_ = at::functionalization::impl::from_functional_tensor(A);
      } else {
        A_ = A;
      }
      
      at::Tensor sign_;
      if (at::functionalization::impl::isFunctionalTensor(sign)) {
        at::functionalization::impl::sync(sign);
        sign_ = at::functionalization::impl::from_functional_tensor(sign);
      } else {
        sign_ = sign;
      }
      
      at::Tensor logabsdet_;
      if (at::functionalization::impl::isFunctionalTensor(logabsdet)) {
        at::functionalization::impl::sync(logabsdet);
        logabsdet_ = at::functionalization::impl::from_functional_tensor(logabsdet);
      } else {
        logabsdet_ = logabsdet;
      }
      
      at::Tensor LU_;
      if (at::functionalization::impl::isFunctionalTensor(LU)) {
        at::functionalization::impl::sync(LU);
        LU_ = at::functionalization::impl::from_functional_tensor(LU);
      } else {
        LU_ = LU;
      }
      
      at::Tensor pivots_;
      if (at::functionalization::impl::isFunctionalTensor(pivots)) {
        at::functionalization::impl::sync(pivots);
        pivots_ = at::functionalization::impl::from_functional_tensor(pivots);
      } else {
        pivots_ = pivots;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(sign) && at::functionalization::impl::isFunctionalTensor(logabsdet) && at::functionalization::impl::isFunctionalTensor(LU) && at::functionalization::impl::isFunctionalTensor(pivots))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || A.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(A))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_linalg_slogdet_sign::call(A_, sign_, logabsdet_, LU_, pivots_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(sign, logabsdet, LU, pivots);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_linalg_slogdet::call(A_);
        }
          auto sign_inner = at::functionalization::impl::from_functional_tensor(sign);
  at::functionalization::impl::replace_(sign, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(sign);
  at::functionalization::impl::sync(sign);
  auto sign_inner_updated = at::functionalization::impl::from_functional_tensor(sign);
  at::functionalization::impl::propagate_xla_data_direct(sign_inner, sign_inner_updated);
  auto logabsdet_inner = at::functionalization::impl::from_functional_tensor(logabsdet);
  at::functionalization::impl::replace_(logabsdet, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(logabsdet);
  at::functionalization::impl::sync(logabsdet);
  auto logabsdet_inner_updated = at::functionalization::impl::from_functional_tensor(logabsdet);
  at::functionalization::impl::propagate_xla_data_direct(logabsdet_inner, logabsdet_inner_updated);
  auto LU_inner = at::functionalization::impl::from_functional_tensor(LU);
  at::functionalization::impl::replace_(LU, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(LU);
  at::functionalization::impl::sync(LU);
  auto LU_inner_updated = at::functionalization::impl::from_functional_tensor(LU);
  at::functionalization::impl::propagate_xla_data_direct(LU_inner, LU_inner_updated);
  auto pivots_inner = at::functionalization::impl::from_functional_tensor(pivots);
  at::functionalization::impl::replace_(pivots, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(pivots);
  at::functionalization::impl::sync(pivots);
  auto pivots_inner_updated = at::functionalization::impl::from_functional_tensor(pivots);
  at::functionalization::impl::propagate_xla_data_direct(pivots_inner, pivots_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(sign, logabsdet, LU, pivots);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> linalg_slogdet_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto A_meta = to_meta(A);
        auto sign_meta = to_meta(sign);
        auto logabsdet_meta = to_meta(logabsdet);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_slogdet_out::call(A_meta, sign_meta, logabsdet_meta);
      }
      
      at::Tensor A_;
      if (at::functionalization::impl::isFunctionalTensor(A)) {
        at::functionalization::impl::sync(A);
        A_ = at::functionalization::impl::from_functional_tensor(A);
      } else {
        A_ = A;
      }
      
      at::Tensor sign_;
      if (at::functionalization::impl::isFunctionalTensor(sign)) {
        at::functionalization::impl::sync(sign);
        sign_ = at::functionalization::impl::from_functional_tensor(sign);
      } else {
        sign_ = sign;
      }
      
      at::Tensor logabsdet_;
      if (at::functionalization::impl::isFunctionalTensor(logabsdet)) {
        at::functionalization::impl::sync(logabsdet);
        logabsdet_ = at::functionalization::impl::from_functional_tensor(logabsdet);
      } else {
        logabsdet_ = logabsdet;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(sign) && at::functionalization::impl::isFunctionalTensor(logabsdet))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || A.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(A))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_slogdet_out::call(A_, sign_, logabsdet_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(sign, logabsdet);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_slogdet::call(A_);
        }
          auto sign_inner = at::functionalization::impl::from_functional_tensor(sign);
  at::functionalization::impl::replace_(sign, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(sign);
  at::functionalization::impl::sync(sign);
  auto sign_inner_updated = at::functionalization::impl::from_functional_tensor(sign);
  at::functionalization::impl::propagate_xla_data_direct(sign_inner, sign_inner_updated);
  auto logabsdet_inner = at::functionalization::impl::from_functional_tensor(logabsdet);
  at::functionalization::impl::replace_(logabsdet, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(logabsdet);
  at::functionalization::impl::sync(logabsdet);
  auto logabsdet_inner_updated = at::functionalization::impl::from_functional_tensor(logabsdet);
  at::functionalization::impl::propagate_xla_data_direct(logabsdet_inner, logabsdet_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(sign, logabsdet);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> slogdet_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & sign, at::Tensor & logabsdet) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto sign_meta = to_meta(sign);
        auto logabsdet_meta = to_meta(logabsdet);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::slogdet_out::call(self_meta, sign_meta, logabsdet_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor sign_;
      if (at::functionalization::impl::isFunctionalTensor(sign)) {
        at::functionalization::impl::sync(sign);
        sign_ = at::functionalization::impl::from_functional_tensor(sign);
      } else {
        sign_ = sign;
      }
      
      at::Tensor logabsdet_;
      if (at::functionalization::impl::isFunctionalTensor(logabsdet)) {
        at::functionalization::impl::sync(logabsdet);
        logabsdet_ = at::functionalization::impl::from_functional_tensor(logabsdet);
      } else {
        logabsdet_ = logabsdet;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(sign) && at::functionalization::impl::isFunctionalTensor(logabsdet))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::slogdet_out::call(self_, sign_, logabsdet_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(sign, logabsdet);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::slogdet::call(self_);
        }
          auto sign_inner = at::functionalization::impl::from_functional_tensor(sign);
  at::functionalization::impl::replace_(sign, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(sign);
  at::functionalization::impl::sync(sign);
  auto sign_inner_updated = at::functionalization::impl::from_functional_tensor(sign);
  at::functionalization::impl::propagate_xla_data_direct(sign_inner, sign_inner_updated);
  auto logabsdet_inner = at::functionalization::impl::from_functional_tensor(logabsdet);
  at::functionalization::impl::replace_(logabsdet, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(logabsdet);
  at::functionalization::impl::sync(logabsdet);
  auto logabsdet_inner_updated = at::functionalization::impl::from_functional_tensor(logabsdet);
  at::functionalization::impl::propagate_xla_data_direct(logabsdet_inner, logabsdet_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(sign, logabsdet);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> linalg_eig_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto eigenvalues_meta = to_meta(eigenvalues);
        auto eigenvectors_meta = to_meta(eigenvectors);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_eig_out::call(self_meta, eigenvalues_meta, eigenvectors_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor eigenvalues_;
      if (at::functionalization::impl::isFunctionalTensor(eigenvalues)) {
        at::functionalization::impl::sync(eigenvalues);
        eigenvalues_ = at::functionalization::impl::from_functional_tensor(eigenvalues);
      } else {
        eigenvalues_ = eigenvalues;
      }
      
      at::Tensor eigenvectors_;
      if (at::functionalization::impl::isFunctionalTensor(eigenvectors)) {
        at::functionalization::impl::sync(eigenvectors);
        eigenvectors_ = at::functionalization::impl::from_functional_tensor(eigenvectors);
      } else {
        eigenvectors_ = eigenvectors;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(eigenvalues) && at::functionalization::impl::isFunctionalTensor(eigenvectors))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_eig_out::call(self_, eigenvalues_, eigenvectors_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(eigenvalues, eigenvectors);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_eig::call(self_);
        }
          auto eigenvalues_inner = at::functionalization::impl::from_functional_tensor(eigenvalues);
  at::functionalization::impl::replace_(eigenvalues, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(eigenvalues);
  at::functionalization::impl::sync(eigenvalues);
  auto eigenvalues_inner_updated = at::functionalization::impl::from_functional_tensor(eigenvalues);
  at::functionalization::impl::propagate_xla_data_direct(eigenvalues_inner, eigenvalues_inner_updated);
  auto eigenvectors_inner = at::functionalization::impl::from_functional_tensor(eigenvectors);
  at::functionalization::impl::replace_(eigenvectors, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(eigenvectors);
  at::functionalization::impl::sync(eigenvectors);
  auto eigenvectors_inner_updated = at::functionalization::impl::from_functional_tensor(eigenvectors);
  at::functionalization::impl::propagate_xla_data_direct(eigenvectors_inner, eigenvectors_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(eigenvalues, eigenvectors);
      }
    }

    at::Tensor & linalg_eigvals_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_eigvals_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_eigvals_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_eigvals::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> _linalg_eigh_out_eigenvalues(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::string_view UPLO, bool compute_v, at::Tensor & eigenvalues, at::Tensor & eigenvectors) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto A_meta = to_meta(A);
        auto eigenvalues_meta = to_meta(eigenvalues);
        auto eigenvectors_meta = to_meta(eigenvectors);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_linalg_eigh_eigenvalues::call(A_meta, UPLO, compute_v, eigenvalues_meta, eigenvectors_meta);
      }
      
      at::Tensor A_;
      if (at::functionalization::impl::isFunctionalTensor(A)) {
        at::functionalization::impl::sync(A);
        A_ = at::functionalization::impl::from_functional_tensor(A);
      } else {
        A_ = A;
      }
      
      at::Tensor eigenvalues_;
      if (at::functionalization::impl::isFunctionalTensor(eigenvalues)) {
        at::functionalization::impl::sync(eigenvalues);
        eigenvalues_ = at::functionalization::impl::from_functional_tensor(eigenvalues);
      } else {
        eigenvalues_ = eigenvalues;
      }
      
      at::Tensor eigenvectors_;
      if (at::functionalization::impl::isFunctionalTensor(eigenvectors)) {
        at::functionalization::impl::sync(eigenvectors);
        eigenvectors_ = at::functionalization::impl::from_functional_tensor(eigenvectors);
      } else {
        eigenvectors_ = eigenvectors;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(eigenvalues) && at::functionalization::impl::isFunctionalTensor(eigenvectors))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || A.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(A))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_linalg_eigh_eigenvalues::call(A_, UPLO, compute_v, eigenvalues_, eigenvectors_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(eigenvalues, eigenvectors);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_linalg_eigh::call(A_, UPLO, compute_v);
        }
          auto eigenvalues_inner = at::functionalization::impl::from_functional_tensor(eigenvalues);
  at::functionalization::impl::replace_(eigenvalues, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(eigenvalues);
  at::functionalization::impl::sync(eigenvalues);
  auto eigenvalues_inner_updated = at::functionalization::impl::from_functional_tensor(eigenvalues);
  at::functionalization::impl::propagate_xla_data_direct(eigenvalues_inner, eigenvalues_inner_updated);
  auto eigenvectors_inner = at::functionalization::impl::from_functional_tensor(eigenvectors);
  at::functionalization::impl::replace_(eigenvectors, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(eigenvectors);
  at::functionalization::impl::sync(eigenvectors);
  auto eigenvectors_inner_updated = at::functionalization::impl::from_functional_tensor(eigenvectors);
  at::functionalization::impl::propagate_xla_data_direct(eigenvectors_inner, eigenvectors_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(eigenvalues, eigenvectors);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> linalg_eigh_out_eigvals(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto eigvals_meta = to_meta(eigvals);
        auto eigvecs_meta = to_meta(eigvecs);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_eigh_eigvals::call(self_meta, UPLO, eigvals_meta, eigvecs_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor eigvals_;
      if (at::functionalization::impl::isFunctionalTensor(eigvals)) {
        at::functionalization::impl::sync(eigvals);
        eigvals_ = at::functionalization::impl::from_functional_tensor(eigvals);
      } else {
        eigvals_ = eigvals;
      }
      
      at::Tensor eigvecs_;
      if (at::functionalization::impl::isFunctionalTensor(eigvecs)) {
        at::functionalization::impl::sync(eigvecs);
        eigvecs_ = at::functionalization::impl::from_functional_tensor(eigvecs);
      } else {
        eigvecs_ = eigvecs;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(eigvals) && at::functionalization::impl::isFunctionalTensor(eigvecs))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_eigh_eigvals::call(self_, UPLO, eigvals_, eigvecs_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(eigvals, eigvecs);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_eigh::call(self_, UPLO);
        }
          auto eigvals_inner = at::functionalization::impl::from_functional_tensor(eigvals);
  at::functionalization::impl::replace_(eigvals, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(eigvals);
  at::functionalization::impl::sync(eigvals);
  auto eigvals_inner_updated = at::functionalization::impl::from_functional_tensor(eigvals);
  at::functionalization::impl::propagate_xla_data_direct(eigvals_inner, eigvals_inner_updated);
  auto eigvecs_inner = at::functionalization::impl::from_functional_tensor(eigvecs);
  at::functionalization::impl::replace_(eigvecs, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(eigvecs);
  at::functionalization::impl::sync(eigvecs);
  auto eigvecs_inner_updated = at::functionalization::impl::from_functional_tensor(eigvecs);
  at::functionalization::impl::propagate_xla_data_direct(eigvecs_inner, eigvecs_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(eigvals, eigvecs);
      }
    }

    at::Tensor & linalg_eigvalsh_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_eigvalsh_out::call(self_meta, UPLO, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_eigvalsh_out::call(self_, UPLO, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_eigvalsh::call(self_, UPLO);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & linalg_householder_product_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tau, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto tau_meta = to_meta(tau);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_householder_product_out::call(input_meta, tau_meta, out_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor tau_;
      if (at::functionalization::impl::isFunctionalTensor(tau)) {
        at::functionalization::impl::sync(tau);
        tau_ = at::functionalization::impl::from_functional_tensor(tau);
      } else {
        tau_ = tau;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || tau.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(tau))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_householder_product_out::call(input_, tau_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_householder_product::call(input_, tau_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> linalg_inv_ex_out_inverse(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto A_meta = to_meta(A);
        auto inverse_meta = to_meta(inverse);
        auto info_meta = to_meta(info);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_inv_ex_inverse::call(A_meta, check_errors, inverse_meta, info_meta);
      }
      
      at::Tensor A_;
      if (at::functionalization::impl::isFunctionalTensor(A)) {
        at::functionalization::impl::sync(A);
        A_ = at::functionalization::impl::from_functional_tensor(A);
      } else {
        A_ = A;
      }
      
      at::Tensor inverse_;
      if (at::functionalization::impl::isFunctionalTensor(inverse)) {
        at::functionalization::impl::sync(inverse);
        inverse_ = at::functionalization::impl::from_functional_tensor(inverse);
      } else {
        inverse_ = inverse;
      }
      
      at::Tensor info_;
      if (at::functionalization::impl::isFunctionalTensor(info)) {
        at::functionalization::impl::sync(info);
        info_ = at::functionalization::impl::from_functional_tensor(info);
      } else {
        info_ = info;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(inverse) && at::functionalization::impl::isFunctionalTensor(info))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || A.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(A))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_inv_ex_inverse::call(A_, check_errors, inverse_, info_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(inverse, info);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_inv_ex::call(A_, check_errors);
        }
          auto inverse_inner = at::functionalization::impl::from_functional_tensor(inverse);
  at::functionalization::impl::replace_(inverse, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(inverse);
  at::functionalization::impl::sync(inverse);
  auto inverse_inner_updated = at::functionalization::impl::from_functional_tensor(inverse);
  at::functionalization::impl::propagate_xla_data_direct(inverse_inner, inverse_inner_updated);
  auto info_inner = at::functionalization::impl::from_functional_tensor(info);
  at::functionalization::impl::replace_(info, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(info);
  at::functionalization::impl::sync(info);
  auto info_inner_updated = at::functionalization::impl::from_functional_tensor(info);
  at::functionalization::impl::propagate_xla_data_direct(info_inner, info_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(inverse, info);
      }
    }

    at::Tensor & linalg_inv_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto A_meta = to_meta(A);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_inv_out::call(A_meta, out_meta);
      }
      
      at::Tensor A_;
      if (at::functionalization::impl::isFunctionalTensor(A)) {
        at::functionalization::impl::sync(A);
        A_ = at::functionalization::impl::from_functional_tensor(A);
      } else {
        A_ = A;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || A.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(A))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_inv_out::call(A_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_inv::call(A_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & inverse_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::inverse_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::inverse_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::inverse::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & inner_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::inner_out::call(self_meta, other_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::inner_out::call(self_, other_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::inner::call(self_, other_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & outer_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto vec2_meta = to_meta(vec2);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::outer_out::call(self_meta, vec2_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor vec2_;
      if (at::functionalization::impl::isFunctionalTensor(vec2)) {
        at::functionalization::impl::sync(vec2);
        vec2_ = at::functionalization::impl::from_functional_tensor(vec2);
      } else {
        vec2_ = vec2;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || vec2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(vec2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::outer_out::call(self_, vec2_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::outer::call(self_, vec2_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & ger_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto vec2_meta = to_meta(vec2);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::ger_out::call(self_meta, vec2_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor vec2_;
      if (at::functionalization::impl::isFunctionalTensor(vec2)) {
        at::functionalization::impl::sync(vec2);
        vec2_ = at::functionalization::impl::from_functional_tensor(vec2);
      } else {
        vec2_ = vec2;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || vec2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(vec2))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::ger_out::call(self_, vec2_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::ger::call(self_, vec2_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & linalg_norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_norm_out::call(self_meta, ord, dim, keepdim, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_norm_out::call(self_, ord, dim, keepdim, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_norm::call(self_, ord, dim, keepdim, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & linalg_norm_out_ord_str_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_norm_ord_str_out::call(self_meta, ord, dim, keepdim, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_norm_ord_str_out::call(self_, ord, dim, keepdim, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_norm_ord_str::call(self_, ord, dim, keepdim, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & linalg_vector_norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_vector_norm_out::call(self_meta, ord, dim, keepdim, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_vector_norm_out::call(self_, ord, dim, keepdim, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_vector_norm::call(self_, ord, dim, keepdim, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & linalg_matrix_norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_matrix_norm_out::call(self_meta, ord, dim, keepdim, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_matrix_norm_out::call(self_, ord, dim, keepdim, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_matrix_norm::call(self_, ord, dim, keepdim, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & linalg_matrix_norm_out_str_ord_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_matrix_norm_str_ord_out::call(self_meta, ord, dim, keepdim, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_matrix_norm_str_ord_out::call(self_, ord, dim, keepdim, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_matrix_norm_str_ord::call(self_, ord, dim, keepdim, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _linalg_svd_out_U(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool full_matrices, bool compute_uv, ::std::optional<c10::string_view> driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto A_meta = to_meta(A);
        auto U_meta = to_meta(U);
        auto S_meta = to_meta(S);
        auto Vh_meta = to_meta(Vh);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_linalg_svd_U::call(A_meta, full_matrices, compute_uv, driver, U_meta, S_meta, Vh_meta);
      }
      
      at::Tensor A_;
      if (at::functionalization::impl::isFunctionalTensor(A)) {
        at::functionalization::impl::sync(A);
        A_ = at::functionalization::impl::from_functional_tensor(A);
      } else {
        A_ = A;
      }
      
      at::Tensor U_;
      if (at::functionalization::impl::isFunctionalTensor(U)) {
        at::functionalization::impl::sync(U);
        U_ = at::functionalization::impl::from_functional_tensor(U);
      } else {
        U_ = U;
      }
      
      at::Tensor S_;
      if (at::functionalization::impl::isFunctionalTensor(S)) {
        at::functionalization::impl::sync(S);
        S_ = at::functionalization::impl::from_functional_tensor(S);
      } else {
        S_ = S;
      }
      
      at::Tensor Vh_;
      if (at::functionalization::impl::isFunctionalTensor(Vh)) {
        at::functionalization::impl::sync(Vh);
        Vh_ = at::functionalization::impl::from_functional_tensor(Vh);
      } else {
        Vh_ = Vh;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(U) && at::functionalization::impl::isFunctionalTensor(S) && at::functionalization::impl::isFunctionalTensor(Vh))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || A.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(A))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_linalg_svd_U::call(A_, full_matrices, compute_uv, driver, U_, S_, Vh_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(U, S, Vh);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_linalg_svd::call(A_, full_matrices, compute_uv, driver);
        }
          auto U_inner = at::functionalization::impl::from_functional_tensor(U);
  at::functionalization::impl::replace_(U, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(U);
  at::functionalization::impl::sync(U);
  auto U_inner_updated = at::functionalization::impl::from_functional_tensor(U);
  at::functionalization::impl::propagate_xla_data_direct(U_inner, U_inner_updated);
  auto S_inner = at::functionalization::impl::from_functional_tensor(S);
  at::functionalization::impl::replace_(S, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(S);
  at::functionalization::impl::sync(S);
  auto S_inner_updated = at::functionalization::impl::from_functional_tensor(S);
  at::functionalization::impl::propagate_xla_data_direct(S_inner, S_inner_updated);
  auto Vh_inner = at::functionalization::impl::from_functional_tensor(Vh);
  at::functionalization::impl::replace_(Vh, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(Vh);
  at::functionalization::impl::sync(Vh);
  auto Vh_inner_updated = at::functionalization::impl::from_functional_tensor(Vh);
  at::functionalization::impl::propagate_xla_data_direct(Vh_inner, Vh_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(U, S, Vh);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_svd_out_U(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool full_matrices, ::std::optional<c10::string_view> driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto A_meta = to_meta(A);
        auto U_meta = to_meta(U);
        auto S_meta = to_meta(S);
        auto Vh_meta = to_meta(Vh);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_svd_U::call(A_meta, full_matrices, driver, U_meta, S_meta, Vh_meta);
      }
      
      at::Tensor A_;
      if (at::functionalization::impl::isFunctionalTensor(A)) {
        at::functionalization::impl::sync(A);
        A_ = at::functionalization::impl::from_functional_tensor(A);
      } else {
        A_ = A;
      }
      
      at::Tensor U_;
      if (at::functionalization::impl::isFunctionalTensor(U)) {
        at::functionalization::impl::sync(U);
        U_ = at::functionalization::impl::from_functional_tensor(U);
      } else {
        U_ = U;
      }
      
      at::Tensor S_;
      if (at::functionalization::impl::isFunctionalTensor(S)) {
        at::functionalization::impl::sync(S);
        S_ = at::functionalization::impl::from_functional_tensor(S);
      } else {
        S_ = S;
      }
      
      at::Tensor Vh_;
      if (at::functionalization::impl::isFunctionalTensor(Vh)) {
        at::functionalization::impl::sync(Vh);
        Vh_ = at::functionalization::impl::from_functional_tensor(Vh);
      } else {
        Vh_ = Vh;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(U) && at::functionalization::impl::isFunctionalTensor(S) && at::functionalization::impl::isFunctionalTensor(Vh))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || A.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(A))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_svd_U::call(A_, full_matrices, driver, U_, S_, Vh_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(U, S, Vh);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_svd::call(A_, full_matrices, driver);
        }
          auto U_inner = at::functionalization::impl::from_functional_tensor(U);
  at::functionalization::impl::replace_(U, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(U);
  at::functionalization::impl::sync(U);
  auto U_inner_updated = at::functionalization::impl::from_functional_tensor(U);
  at::functionalization::impl::propagate_xla_data_direct(U_inner, U_inner_updated);
  auto S_inner = at::functionalization::impl::from_functional_tensor(S);
  at::functionalization::impl::replace_(S, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(S);
  at::functionalization::impl::sync(S);
  auto S_inner_updated = at::functionalization::impl::from_functional_tensor(S);
  at::functionalization::impl::propagate_xla_data_direct(S_inner, S_inner_updated);
  auto Vh_inner = at::functionalization::impl::from_functional_tensor(Vh);
  at::functionalization::impl::replace_(Vh, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(Vh);
  at::functionalization::impl::sync(Vh);
  auto Vh_inner_updated = at::functionalization::impl::from_functional_tensor(Vh);
  at::functionalization::impl::propagate_xla_data_direct(Vh_inner, Vh_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(U, S, Vh);
      }
    }

    at::Tensor & linalg_svdvals_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, ::std::optional<c10::string_view> driver, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto A_meta = to_meta(A);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_svdvals_out::call(A_meta, driver, out_meta);
      }
      
      at::Tensor A_;
      if (at::functionalization::impl::isFunctionalTensor(A)) {
        at::functionalization::impl::sync(A);
        A_ = at::functionalization::impl::from_functional_tensor(A);
      } else {
        A_ = A;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || A.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(A))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_svdvals_out::call(A_, driver, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_svdvals::call(A_, driver);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & linalg_cond_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & p, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_cond_out::call(self_meta, p, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_cond_out::call(self_, p, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_cond::call(self_, p);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & linalg_cond_out_p_str_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view p, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_cond_p_str_out::call(self_meta, p, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_cond_p_str_out::call(self_, p, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_cond_p_str::call(self_, p);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & linalg_pinv_out_atol_rtol_tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Tensor> & atol, const ::std::optional<at::Tensor> & rtol, bool hermitian, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto atol_meta = to_meta(atol);
        auto rtol_meta = to_meta(rtol);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_pinv_atol_rtol_tensor_out::call(self_meta, atol_meta, rtol_meta, hermitian, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      ::std::optional<at::Tensor> atol_;
      if (at::functionalization::impl::isFunctionalTensor(atol)) {
        at::functionalization::impl::sync(atol);
        atol_ = at::functionalization::impl::from_functional_tensor(atol);
      } else {
        atol_ = atol;
      }
      
      ::std::optional<at::Tensor> rtol_;
      if (at::functionalization::impl::isFunctionalTensor(rtol)) {
        at::functionalization::impl::sync(rtol);
        rtol_ = at::functionalization::impl::from_functional_tensor(rtol);
      } else {
        rtol_ = rtol;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(atol) || at::functionalization::impl::isFunctionalTensor(rtol))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_pinv_atol_rtol_tensor_out::call(self_, atol_, rtol_, hermitian, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_pinv_atol_rtol_tensor::call(self_, atol_, rtol_, hermitian);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & linalg_pinv_out_atol_rtol_float_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<double> atol, ::std::optional<double> rtol, bool hermitian, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_pinv_atol_rtol_float_out::call(self_meta, atol, rtol, hermitian, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_pinv_atol_rtol_float_out::call(self_, atol, rtol, hermitian, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_pinv_atol_rtol_float::call(self_, atol, rtol, hermitian);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & linalg_pinv_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double rcond, bool hermitian, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_pinv_out::call(self_meta, rcond, hermitian, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_pinv_out::call(self_, rcond, hermitian, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_pinv::call(self_, rcond, hermitian);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & linalg_pinv_out_out_rcond_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & rcond, bool hermitian, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto rcond_meta = to_meta(rcond);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_pinv_out_rcond_tensor::call(self_meta, rcond_meta, hermitian, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor rcond_;
      if (at::functionalization::impl::isFunctionalTensor(rcond)) {
        at::functionalization::impl::sync(rcond);
        rcond_ = at::functionalization::impl::from_functional_tensor(rcond);
      } else {
        rcond_ = rcond;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || rcond.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(rcond))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_pinv_out_rcond_tensor::call(self_, rcond_, hermitian, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_pinv_rcond_tensor::call(self_, rcond_, hermitian);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _linalg_solve_ex_out_result(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto A_meta = to_meta(A);
        auto B_meta = to_meta(B);
        auto result_meta = to_meta(result);
        auto LU_meta = to_meta(LU);
        auto pivots_meta = to_meta(pivots);
        auto info_meta = to_meta(info);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_linalg_solve_ex_result::call(A_meta, B_meta, left, check_errors, result_meta, LU_meta, pivots_meta, info_meta);
      }
      
      at::Tensor A_;
      if (at::functionalization::impl::isFunctionalTensor(A)) {
        at::functionalization::impl::sync(A);
        A_ = at::functionalization::impl::from_functional_tensor(A);
      } else {
        A_ = A;
      }
      
      at::Tensor B_;
      if (at::functionalization::impl::isFunctionalTensor(B)) {
        at::functionalization::impl::sync(B);
        B_ = at::functionalization::impl::from_functional_tensor(B);
      } else {
        B_ = B;
      }
      
      at::Tensor result_;
      if (at::functionalization::impl::isFunctionalTensor(result)) {
        at::functionalization::impl::sync(result);
        result_ = at::functionalization::impl::from_functional_tensor(result);
      } else {
        result_ = result;
      }
      
      at::Tensor LU_;
      if (at::functionalization::impl::isFunctionalTensor(LU)) {
        at::functionalization::impl::sync(LU);
        LU_ = at::functionalization::impl::from_functional_tensor(LU);
      } else {
        LU_ = LU;
      }
      
      at::Tensor pivots_;
      if (at::functionalization::impl::isFunctionalTensor(pivots)) {
        at::functionalization::impl::sync(pivots);
        pivots_ = at::functionalization::impl::from_functional_tensor(pivots);
      } else {
        pivots_ = pivots;
      }
      
      at::Tensor info_;
      if (at::functionalization::impl::isFunctionalTensor(info)) {
        at::functionalization::impl::sync(info);
        info_ = at::functionalization::impl::from_functional_tensor(info);
      } else {
        info_ = info;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(result) && at::functionalization::impl::isFunctionalTensor(LU) && at::functionalization::impl::isFunctionalTensor(pivots) && at::functionalization::impl::isFunctionalTensor(info))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || A.device().type() == c10::DeviceType::XLA || B.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(A) || at::functionalization::impl::isFunctionalTensor(B))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_linalg_solve_ex_result::call(A_, B_, left, check_errors, result_, LU_, pivots_, info_);
         return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(result, LU, pivots, info);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_linalg_solve_ex::call(A_, B_, left, check_errors);
        }
          auto result_inner = at::functionalization::impl::from_functional_tensor(result);
  at::functionalization::impl::replace_(result, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(result);
  at::functionalization::impl::sync(result);
  auto result_inner_updated = at::functionalization::impl::from_functional_tensor(result);
  at::functionalization::impl::propagate_xla_data_direct(result_inner, result_inner_updated);
  auto LU_inner = at::functionalization::impl::from_functional_tensor(LU);
  at::functionalization::impl::replace_(LU, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(LU);
  at::functionalization::impl::sync(LU);
  auto LU_inner_updated = at::functionalization::impl::from_functional_tensor(LU);
  at::functionalization::impl::propagate_xla_data_direct(LU_inner, LU_inner_updated);
  auto pivots_inner = at::functionalization::impl::from_functional_tensor(pivots);
  at::functionalization::impl::replace_(pivots, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(pivots);
  at::functionalization::impl::sync(pivots);
  auto pivots_inner_updated = at::functionalization::impl::from_functional_tensor(pivots);
  at::functionalization::impl::propagate_xla_data_direct(pivots_inner, pivots_inner_updated);
  auto info_inner = at::functionalization::impl::from_functional_tensor(info);
  at::functionalization::impl::replace_(info, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(info);
  at::functionalization::impl::sync(info);
  auto info_inner_updated = at::functionalization::impl::from_functional_tensor(info);
  at::functionalization::impl::propagate_xla_data_direct(info_inner, info_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(result, LU, pivots, info);
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> linalg_solve_ex_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & info) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto A_meta = to_meta(A);
        auto B_meta = to_meta(B);
        auto result_meta = to_meta(result);
        auto info_meta = to_meta(info);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_solve_ex_out::call(A_meta, B_meta, left, check_errors, result_meta, info_meta);
      }
      
      at::Tensor A_;
      if (at::functionalization::impl::isFunctionalTensor(A)) {
        at::functionalization::impl::sync(A);
        A_ = at::functionalization::impl::from_functional_tensor(A);
      } else {
        A_ = A;
      }
      
      at::Tensor B_;
      if (at::functionalization::impl::isFunctionalTensor(B)) {
        at::functionalization::impl::sync(B);
        B_ = at::functionalization::impl::from_functional_tensor(B);
      } else {
        B_ = B;
      }
      
      at::Tensor result_;
      if (at::functionalization::impl::isFunctionalTensor(result)) {
        at::functionalization::impl::sync(result);
        result_ = at::functionalization::impl::from_functional_tensor(result);
      } else {
        result_ = result;
      }
      
      at::Tensor info_;
      if (at::functionalization::impl::isFunctionalTensor(info)) {
        at::functionalization::impl::sync(info);
        info_ = at::functionalization::impl::from_functional_tensor(info);
      } else {
        info_ = info;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(result) && at::functionalization::impl::isFunctionalTensor(info))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || A.device().type() == c10::DeviceType::XLA || B.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(A) || at::functionalization::impl::isFunctionalTensor(B))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_solve_ex_out::call(A_, B_, left, check_errors, result_, info_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(result, info);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_solve_ex::call(A_, B_, left, check_errors);
        }
          auto result_inner = at::functionalization::impl::from_functional_tensor(result);
  at::functionalization::impl::replace_(result, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(result);
  at::functionalization::impl::sync(result);
  auto result_inner_updated = at::functionalization::impl::from_functional_tensor(result);
  at::functionalization::impl::propagate_xla_data_direct(result_inner, result_inner_updated);
  auto info_inner = at::functionalization::impl::from_functional_tensor(info);
  at::functionalization::impl::replace_(info, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(info);
  at::functionalization::impl::sync(info);
  auto info_inner_updated = at::functionalization::impl::from_functional_tensor(info);
  at::functionalization::impl::propagate_xla_data_direct(info_inner, info_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(result, info);
      }
    }

    at::Tensor & linalg_solve_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto A_meta = to_meta(A);
        auto B_meta = to_meta(B);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_solve_out::call(A_meta, B_meta, left, out_meta);
      }
      
      at::Tensor A_;
      if (at::functionalization::impl::isFunctionalTensor(A)) {
        at::functionalization::impl::sync(A);
        A_ = at::functionalization::impl::from_functional_tensor(A);
      } else {
        A_ = A;
      }
      
      at::Tensor B_;
      if (at::functionalization::impl::isFunctionalTensor(B)) {
        at::functionalization::impl::sync(B);
        B_ = at::functionalization::impl::from_functional_tensor(B);
      } else {
        B_ = B;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || A.device().type() == c10::DeviceType::XLA || B.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(A) || at::functionalization::impl::isFunctionalTensor(B))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_solve_out::call(A_, B_, left, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_solve::call(A_, B_, left);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & linalg_tensorinv_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t ind, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_tensorinv_out::call(self_meta, ind, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_tensorinv_out::call(self_, ind, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_tensorinv::call(self_, ind);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & linalg_tensorsolve_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto other_meta = to_meta(other);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_tensorsolve_out::call(self_meta, other_meta, dims, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor other_;
      if (at::functionalization::impl::isFunctionalTensor(other)) {
        at::functionalization::impl::sync(other);
        other_ = at::functionalization::impl::from_functional_tensor(other);
      } else {
        other_ = other;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA || other.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_tensorsolve_out::call(self_, other_, dims, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_tensorsolve::call(self_, other_, dims);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> linalg_qr_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::string_view mode, at::Tensor & Q, at::Tensor & R) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto A_meta = to_meta(A);
        auto Q_meta = to_meta(Q);
        auto R_meta = to_meta(R);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_qr_out::call(A_meta, mode, Q_meta, R_meta);
      }
      
      at::Tensor A_;
      if (at::functionalization::impl::isFunctionalTensor(A)) {
        at::functionalization::impl::sync(A);
        A_ = at::functionalization::impl::from_functional_tensor(A);
      } else {
        A_ = A;
      }
      
      at::Tensor Q_;
      if (at::functionalization::impl::isFunctionalTensor(Q)) {
        at::functionalization::impl::sync(Q);
        Q_ = at::functionalization::impl::from_functional_tensor(Q);
      } else {
        Q_ = Q;
      }
      
      at::Tensor R_;
      if (at::functionalization::impl::isFunctionalTensor(R)) {
        at::functionalization::impl::sync(R);
        R_ = at::functionalization::impl::from_functional_tensor(R);
      } else {
        R_ = R;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(Q) && at::functionalization::impl::isFunctionalTensor(R))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || A.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(A))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_qr_out::call(A_, mode, Q_, R_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(Q, R);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_qr::call(A_, mode);
        }
          auto Q_inner = at::functionalization::impl::from_functional_tensor(Q);
  at::functionalization::impl::replace_(Q, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(Q);
  at::functionalization::impl::sync(Q);
  auto Q_inner_updated = at::functionalization::impl::from_functional_tensor(Q);
  at::functionalization::impl::propagate_xla_data_direct(Q_inner, Q_inner_updated);
  auto R_inner = at::functionalization::impl::from_functional_tensor(R);
  at::functionalization::impl::replace_(R, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(R);
  at::functionalization::impl::sync(R);
  auto R_inner_updated = at::functionalization::impl::from_functional_tensor(R);
  at::functionalization::impl::propagate_xla_data_direct(R_inner, R_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(Q, R);
      }
    }

    at::Tensor & linalg_matrix_power_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_matrix_power_out::call(self_meta, n, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_matrix_power_out::call(self_, n, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_matrix_power::call(self_, n);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & linalg_matrix_rank_out_atol_rtol_tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional<at::Tensor> & atol, const ::std::optional<at::Tensor> & rtol, bool hermitian, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto atol_meta = to_meta(atol);
        auto rtol_meta = to_meta(rtol);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_matrix_rank_atol_rtol_tensor_out::call(input_meta, atol_meta, rtol_meta, hermitian, out_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      ::std::optional<at::Tensor> atol_;
      if (at::functionalization::impl::isFunctionalTensor(atol)) {
        at::functionalization::impl::sync(atol);
        atol_ = at::functionalization::impl::from_functional_tensor(atol);
      } else {
        atol_ = atol;
      }
      
      ::std::optional<at::Tensor> rtol_;
      if (at::functionalization::impl::isFunctionalTensor(rtol)) {
        at::functionalization::impl::sync(rtol);
        rtol_ = at::functionalization::impl::from_functional_tensor(rtol);
      } else {
        rtol_ = rtol;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(atol) || at::functionalization::impl::isFunctionalTensor(rtol))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_matrix_rank_atol_rtol_tensor_out::call(input_, atol_, rtol_, hermitian, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_matrix_rank_atol_rtol_tensor::call(input_, atol_, rtol_, hermitian);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & linalg_matrix_rank_out_atol_rtol_float_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<double> atol, ::std::optional<double> rtol, bool hermitian, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_matrix_rank_atol_rtol_float_out::call(self_meta, atol, rtol, hermitian, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_matrix_rank_atol_rtol_float_out::call(self_, atol, rtol, hermitian, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_matrix_rank_atol_rtol_float::call(self_, atol, rtol, hermitian);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & linalg_matrix_rank_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double tol, bool hermitian, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_matrix_rank_out::call(self_meta, tol, hermitian, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_matrix_rank_out::call(self_, tol, hermitian, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_matrix_rank::call(self_, tol, hermitian);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & linalg_matrix_rank_out_out_tol_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tol, bool hermitian, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto input_meta = to_meta(input);
        auto tol_meta = to_meta(tol);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_matrix_rank_out_tol_tensor::call(input_meta, tol_meta, hermitian, out_meta);
      }
      
      at::Tensor input_;
      if (at::functionalization::impl::isFunctionalTensor(input)) {
        at::functionalization::impl::sync(input);
        input_ = at::functionalization::impl::from_functional_tensor(input);
      } else {
        input_ = input;
      }
      
      at::Tensor tol_;
      if (at::functionalization::impl::isFunctionalTensor(tol)) {
        at::functionalization::impl::sync(tol);
        tol_ = at::functionalization::impl::from_functional_tensor(tol);
      } else {
        tol_ = tol;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || input.device().type() == c10::DeviceType::XLA || tol.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(tol))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_matrix_rank_out_tol_tensor::call(input_, tol_, hermitian, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_matrix_rank_tol_tensor::call(input_, tol_, hermitian);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & linalg_multi_dot_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto tensors_meta = to_meta(tensors);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::linalg_multi_dot_out::call(tensors_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> tensors_;
      if (at::functionalization::impl::isFunctionalTensor(tensors)) {
        at::functionalization::impl::sync(tensors);
        tensors_ = at::functionalization::impl::from_functional_tensor(tensors);
      } else {
        tensors_ = tensors.vec();
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(tensors))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::linalg_multi_dot_out::call(tensors_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::linalg_multi_dot::call(tensors_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _test_optional_intlist_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto values_meta = to_meta(values);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_test_optional_intlist_out::call(values_meta, addends, out_meta);
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || values.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(values))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_test_optional_intlist_out::call(values_, addends, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_test_optional_intlist::call(values_, addends);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _test_optional_filled_intlist_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto values_meta = to_meta(values);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_test_optional_filled_intlist_out::call(values_meta, addends, out_meta);
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || values.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(values))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_test_optional_filled_intlist_out::call(values_, addends, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_test_optional_filled_intlist::call(values_, addends);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _test_optional_floatlist_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, ::std::optional<at::ArrayRef<double>> addends, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto values_meta = to_meta(values);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_test_optional_floatlist_out::call(values_meta, addends, out_meta);
      }
      
      at::Tensor values_;
      if (at::functionalization::impl::isFunctionalTensor(values)) {
        at::functionalization::impl::sync(values);
        values_ = at::functionalization::impl::from_functional_tensor(values);
      } else {
        values_ = values;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || values.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(values))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_test_optional_floatlist_out::call(values_, addends, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_test_optional_floatlist::call(values_, addends);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _test_warn_in_autograd_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_test_warn_in_autograd_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_test_warn_in_autograd_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_test_warn_in_autograd::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _test_autograd_multiple_dispatch_out_fullcoverage_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_test_autograd_multiple_dispatch_fullcoverage_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_test_autograd_multiple_dispatch_fullcoverage_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_test_autograd_multiple_dispatch_fullcoverage::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _test_autograd_multiple_dispatch_view_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_test_autograd_multiple_dispatch_view_copy_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_test_autograd_multiple_dispatch_view_copy_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_test_autograd_multiple_dispatch_view_copy::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & segment_reduce_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, c10::string_view reduce, const ::std::optional<at::Tensor> & lengths, const ::std::optional<at::Tensor> & indices, const ::std::optional<at::Tensor> & offsets, int64_t axis, bool unsafe, const ::std::optional<at::Scalar> & initial, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto data_meta = to_meta(data);
        auto lengths_meta = to_meta(lengths);
        auto indices_meta = to_meta(indices);
        auto offsets_meta = to_meta(offsets);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::segment_reduce_out::call(data_meta, reduce, lengths_meta, indices_meta, offsets_meta, axis, unsafe, initial, out_meta);
      }
      
      at::Tensor data_;
      if (at::functionalization::impl::isFunctionalTensor(data)) {
        at::functionalization::impl::sync(data);
        data_ = at::functionalization::impl::from_functional_tensor(data);
      } else {
        data_ = data;
      }
      
      ::std::optional<at::Tensor> lengths_;
      if (at::functionalization::impl::isFunctionalTensor(lengths)) {
        at::functionalization::impl::sync(lengths);
        lengths_ = at::functionalization::impl::from_functional_tensor(lengths);
      } else {
        lengths_ = lengths;
      }
      
      ::std::optional<at::Tensor> indices_;
      if (at::functionalization::impl::isFunctionalTensor(indices)) {
        at::functionalization::impl::sync(indices);
        indices_ = at::functionalization::impl::from_functional_tensor(indices);
      } else {
        indices_ = indices;
      }
      
      ::std::optional<at::Tensor> offsets_;
      if (at::functionalization::impl::isFunctionalTensor(offsets)) {
        at::functionalization::impl::sync(offsets);
        offsets_ = at::functionalization::impl::from_functional_tensor(offsets);
      } else {
        offsets_ = offsets;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || data.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(data) || at::functionalization::impl::isFunctionalTensor(lengths) || at::functionalization::impl::isFunctionalTensor(indices) || at::functionalization::impl::isFunctionalTensor(offsets))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::segment_reduce_out::call(data_, reduce, lengths_, indices_, offsets_, axis, unsafe, initial, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::segment_reduce::call(data_, reduce, lengths_, indices_, offsets_, axis, unsafe, initial);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _segment_reduce_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const ::std::optional<at::Tensor> & lengths, const ::std::optional<at::Tensor> & offsets, int64_t axis, const ::std::optional<at::Scalar> & initial, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto grad_meta = to_meta(grad);
        auto output_meta = to_meta(output);
        auto data_meta = to_meta(data);
        auto lengths_meta = to_meta(lengths);
        auto offsets_meta = to_meta(offsets);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_segment_reduce_backward_out::call(grad_meta, output_meta, data_meta, reduce, lengths_meta, offsets_meta, axis, initial, out_meta);
      }
      
      at::Tensor grad_;
      if (at::functionalization::impl::isFunctionalTensor(grad)) {
        at::functionalization::impl::sync(grad);
        grad_ = at::functionalization::impl::from_functional_tensor(grad);
      } else {
        grad_ = grad;
      }
      
      at::Tensor output_;
      if (at::functionalization::impl::isFunctionalTensor(output)) {
        at::functionalization::impl::sync(output);
        output_ = at::functionalization::impl::from_functional_tensor(output);
      } else {
        output_ = output;
      }
      
      at::Tensor data_;
      if (at::functionalization::impl::isFunctionalTensor(data)) {
        at::functionalization::impl::sync(data);
        data_ = at::functionalization::impl::from_functional_tensor(data);
      } else {
        data_ = data;
      }
      
      ::std::optional<at::Tensor> lengths_;
      if (at::functionalization::impl::isFunctionalTensor(lengths)) {
        at::functionalization::impl::sync(lengths);
        lengths_ = at::functionalization::impl::from_functional_tensor(lengths);
      } else {
        lengths_ = lengths;
      }
      
      ::std::optional<at::Tensor> offsets_;
      if (at::functionalization::impl::isFunctionalTensor(offsets)) {
        at::functionalization::impl::sync(offsets);
        offsets_ = at::functionalization::impl::from_functional_tensor(offsets);
      } else {
        offsets_ = offsets;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || grad.device().type() == c10::DeviceType::XLA || output.device().type() == c10::DeviceType::XLA || data.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(grad) || at::functionalization::impl::isFunctionalTensor(output) || at::functionalization::impl::isFunctionalTensor(data) || at::functionalization::impl::isFunctionalTensor(lengths) || at::functionalization::impl::isFunctionalTensor(offsets))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_segment_reduce_backward_out::call(grad_, output_, data_, reduce, lengths_, offsets_, axis, initial, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_segment_reduce_backward::call(grad_, output_, data_, reduce, lengths_, offsets_, axis, initial);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _nested_tensor_from_tensor_list_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList list, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto list_meta = to_meta(list);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_nested_tensor_from_tensor_list_out::call(list_meta, dtype, layout, device, pin_memory, out_meta);
      }
      
      ::std::vector<at::Tensor> list_;
      if (at::functionalization::impl::isFunctionalTensor(list)) {
        at::functionalization::impl::sync(list);
        list_ = at::functionalization::impl::from_functional_tensor(list);
      } else {
        list_ = list.vec();
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(list))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_nested_tensor_from_tensor_list_out::call(list_, dtype, layout, device, pin_memory, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_nested_tensor_from_tensor_list::call(list_, dtype, layout, device, pin_memory);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _fw_primal_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t level, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_fw_primal_copy_out::call(self_meta, level, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_fw_primal_copy_out::call(self_, level, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_fw_primal_copy::call(self_, level);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _make_dual_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & primal, const at::Tensor & tangent, int64_t level, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto primal_meta = to_meta(primal);
        auto tangent_meta = to_meta(tangent);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_make_dual_copy_out::call(primal_meta, tangent_meta, level, out_meta);
      }
      
      at::Tensor primal_;
      if (at::functionalization::impl::isFunctionalTensor(primal)) {
        at::functionalization::impl::sync(primal);
        primal_ = at::functionalization::impl::from_functional_tensor(primal);
      } else {
        primal_ = primal;
      }
      
      at::Tensor tangent_;
      if (at::functionalization::impl::isFunctionalTensor(tangent)) {
        at::functionalization::impl::sync(tangent);
        tangent_ = at::functionalization::impl::from_functional_tensor(tangent);
      } else {
        tangent_ = tangent;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || primal.device().type() == c10::DeviceType::XLA || tangent.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(primal) || at::functionalization::impl::isFunctionalTensor(tangent))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_make_dual_copy_out::call(primal_, tangent_, level, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_make_dual_copy::call(primal_, tangent_, level);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & view_as_real_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::view_as_real_copy_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::view_as_real_copy_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::view_as_real_copy::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & view_as_complex_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::view_as_complex_copy_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::view_as_complex_copy_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::view_as_complex_copy::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _conj_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_conj_copy_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_conj_copy_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_conj_copy::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _neg_view_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_neg_view_copy_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_neg_view_copy_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_neg_view_copy::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & as_strided_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::as_strided_copy_out::call(self_meta, size, stride, storage_offset, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::as_strided_copy_out::call(self_, size, stride, storage_offset, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::as_strided_copy::call(self_, size, stride, storage_offset);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _sparse_broadcast_to_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_sparse_broadcast_to_copy_out::call(self_meta, size, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_sparse_broadcast_to_copy_out::call(self_, size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_sparse_broadcast_to_copy::call(self_, size);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & diagonal_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::diagonal_copy_out::call(self_meta, offset, dim1, dim2, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::diagonal_copy_out::call(self_, offset, dim1, dim2, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::diagonal_copy::call(self_, offset, dim1, dim2);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & expand_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, bool implicit, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::expand_copy_out::call(self_meta, size, implicit, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::expand_copy_out::call(self_, size, implicit, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::expand_copy::call(self_, size, implicit);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & permute_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::permute_copy_out::call(self_meta, dims, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::permute_copy_out::call(self_, dims, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::permute_copy::call(self_, dims);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _reshape_alias_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_reshape_alias_copy_out::call(self_meta, size, stride, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_reshape_alias_copy_out::call(self_, size, stride, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_reshape_alias_copy::call(self_, size, stride);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & select_copy_out_int_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt index, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::select_copy_int_out::call(self_meta, dim, index, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::select_copy_int_out::call(self_, dim, index, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::select_copy_int::call(self_, dim, index);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & detach_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::detach_copy_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::detach_copy_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::detach_copy::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & slice_copy_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<c10::SymInt> start, ::std::optional<c10::SymInt> end, c10::SymInt step, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::slice_copy_Tensor_out::call(self_meta, dim, start, end, step, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::slice_copy_Tensor_out::call(self_, dim, start, end, step, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::slice_copy_Tensor::call(self_, dim, start, end, step);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    void split_copy_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::split_copy_Tensor_out::call(self_meta, split_size, dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::split_copy_Tensor_out::call(self_, split_size, dim, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::split_copy_Tensor::call(self_, split_size, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void split_with_sizes_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::split_with_sizes_copy_out::call(self_meta, split_sizes, dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::split_with_sizes_copy_out::call(self_, split_sizes, dim, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::split_with_sizes_copy::call(self_, split_sizes, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    at::Tensor & squeeze_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::squeeze_copy_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::squeeze_copy_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::squeeze_copy::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & squeeze_copy_out_dim_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::squeeze_copy_dim_out::call(self_meta, dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::squeeze_copy_dim_out::call(self_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::squeeze_copy_dim::call(self_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & squeeze_copy_out_dims_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::squeeze_copy_dims_out::call(self_meta, dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::squeeze_copy_dims_out::call(self_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::squeeze_copy_dims::call(self_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & t_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::t_copy_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::t_copy_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::t_copy::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & transpose_copy_out_int_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::transpose_copy_int_out::call(self_meta, dim0, dim1, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::transpose_copy_int_out::call(self_, dim0, dim1, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::transpose_copy_int::call(self_, dim0, dim1);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & unsqueeze_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::unsqueeze_copy_out::call(self_meta, dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::unsqueeze_copy_out::call(self_, dim, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::unsqueeze_copy::call(self_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _indices_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_indices_copy_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_indices_copy_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_indices_copy::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _values_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_values_copy_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_values_copy_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_values_copy::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & indices_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::indices_copy_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::indices_copy_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::indices_copy::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & values_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::values_copy_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::values_copy_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::values_copy::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & crow_indices_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::crow_indices_copy_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::crow_indices_copy_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::crow_indices_copy::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & col_indices_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::col_indices_copy_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::col_indices_copy_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::col_indices_copy::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & ccol_indices_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::ccol_indices_copy_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::ccol_indices_copy_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::ccol_indices_copy::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & row_indices_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::row_indices_copy_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::row_indices_copy_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::row_indices_copy::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    void unbind_copy_out_int_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::unbind_copy_int_out::call(self_meta, dim, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::unbind_copy_int_out::call(self_, dim, out_);
         
        }
      } else {
        ::std::vector<at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::unbind_copy_int::call(self_, dim);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    at::Tensor & view_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::view_copy_out::call(self_meta, size, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::view_copy_out::call(self_, size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::view_copy::call(self_, size);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & view_copy_out_dtype_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::view_copy_dtype_out::call(self_meta, dtype, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::view_copy_dtype_out::call(self_, dtype, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::view_copy_dtype::call(self_, dtype);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & unfold_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dimension, int64_t size, int64_t step, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::unfold_copy_out::call(self_meta, dimension, size, step, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::unfold_copy_out::call(self_, dimension, size, step, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::unfold_copy::call(self_, dimension, size, step);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & alias_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::alias_copy_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::alias_copy_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::alias_copy::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & to_padded_tensor_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::to_padded_tensor_out::call(self_meta, padding, output_size, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::to_padded_tensor_out::call(self_, padding, output_size, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::to_padded_tensor::call(self_, padding, output_size);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _transformer_encoder_layer_fwd_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const ::std::optional<at::Tensor> & mask, ::std::optional<int64_t> mask_type, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto src_meta = to_meta(src);
        auto qkv_weight_meta = to_meta(qkv_weight);
        auto qkv_bias_meta = to_meta(qkv_bias);
        auto proj_weight_meta = to_meta(proj_weight);
        auto proj_bias_meta = to_meta(proj_bias);
        auto norm_weight_1_meta = to_meta(norm_weight_1);
        auto norm_bias_1_meta = to_meta(norm_bias_1);
        auto norm_weight_2_meta = to_meta(norm_weight_2);
        auto norm_bias_2_meta = to_meta(norm_bias_2);
        auto ffn_weight_1_meta = to_meta(ffn_weight_1);
        auto ffn_bias_1_meta = to_meta(ffn_bias_1);
        auto ffn_weight_2_meta = to_meta(ffn_weight_2);
        auto ffn_bias_2_meta = to_meta(ffn_bias_2);
        auto mask_meta = to_meta(mask);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_transformer_encoder_layer_fwd_out::call(src_meta, embed_dim, num_heads, qkv_weight_meta, qkv_bias_meta, proj_weight_meta, proj_bias_meta, use_gelu, norm_first, eps, norm_weight_1_meta, norm_bias_1_meta, norm_weight_2_meta, norm_bias_2_meta, ffn_weight_1_meta, ffn_bias_1_meta, ffn_weight_2_meta, ffn_bias_2_meta, mask_meta, mask_type, out_meta);
      }
      
      at::Tensor src_;
      if (at::functionalization::impl::isFunctionalTensor(src)) {
        at::functionalization::impl::sync(src);
        src_ = at::functionalization::impl::from_functional_tensor(src);
      } else {
        src_ = src;
      }
      
      at::Tensor qkv_weight_;
      if (at::functionalization::impl::isFunctionalTensor(qkv_weight)) {
        at::functionalization::impl::sync(qkv_weight);
        qkv_weight_ = at::functionalization::impl::from_functional_tensor(qkv_weight);
      } else {
        qkv_weight_ = qkv_weight;
      }
      
      at::Tensor qkv_bias_;
      if (at::functionalization::impl::isFunctionalTensor(qkv_bias)) {
        at::functionalization::impl::sync(qkv_bias);
        qkv_bias_ = at::functionalization::impl::from_functional_tensor(qkv_bias);
      } else {
        qkv_bias_ = qkv_bias;
      }
      
      at::Tensor proj_weight_;
      if (at::functionalization::impl::isFunctionalTensor(proj_weight)) {
        at::functionalization::impl::sync(proj_weight);
        proj_weight_ = at::functionalization::impl::from_functional_tensor(proj_weight);
      } else {
        proj_weight_ = proj_weight;
      }
      
      at::Tensor proj_bias_;
      if (at::functionalization::impl::isFunctionalTensor(proj_bias)) {
        at::functionalization::impl::sync(proj_bias);
        proj_bias_ = at::functionalization::impl::from_functional_tensor(proj_bias);
      } else {
        proj_bias_ = proj_bias;
      }
      
      at::Tensor norm_weight_1_;
      if (at::functionalization::impl::isFunctionalTensor(norm_weight_1)) {
        at::functionalization::impl::sync(norm_weight_1);
        norm_weight_1_ = at::functionalization::impl::from_functional_tensor(norm_weight_1);
      } else {
        norm_weight_1_ = norm_weight_1;
      }
      
      at::Tensor norm_bias_1_;
      if (at::functionalization::impl::isFunctionalTensor(norm_bias_1)) {
        at::functionalization::impl::sync(norm_bias_1);
        norm_bias_1_ = at::functionalization::impl::from_functional_tensor(norm_bias_1);
      } else {
        norm_bias_1_ = norm_bias_1;
      }
      
      at::Tensor norm_weight_2_;
      if (at::functionalization::impl::isFunctionalTensor(norm_weight_2)) {
        at::functionalization::impl::sync(norm_weight_2);
        norm_weight_2_ = at::functionalization::impl::from_functional_tensor(norm_weight_2);
      } else {
        norm_weight_2_ = norm_weight_2;
      }
      
      at::Tensor norm_bias_2_;
      if (at::functionalization::impl::isFunctionalTensor(norm_bias_2)) {
        at::functionalization::impl::sync(norm_bias_2);
        norm_bias_2_ = at::functionalization::impl::from_functional_tensor(norm_bias_2);
      } else {
        norm_bias_2_ = norm_bias_2;
      }
      
      at::Tensor ffn_weight_1_;
      if (at::functionalization::impl::isFunctionalTensor(ffn_weight_1)) {
        at::functionalization::impl::sync(ffn_weight_1);
        ffn_weight_1_ = at::functionalization::impl::from_functional_tensor(ffn_weight_1);
      } else {
        ffn_weight_1_ = ffn_weight_1;
      }
      
      at::Tensor ffn_bias_1_;
      if (at::functionalization::impl::isFunctionalTensor(ffn_bias_1)) {
        at::functionalization::impl::sync(ffn_bias_1);
        ffn_bias_1_ = at::functionalization::impl::from_functional_tensor(ffn_bias_1);
      } else {
        ffn_bias_1_ = ffn_bias_1;
      }
      
      at::Tensor ffn_weight_2_;
      if (at::functionalization::impl::isFunctionalTensor(ffn_weight_2)) {
        at::functionalization::impl::sync(ffn_weight_2);
        ffn_weight_2_ = at::functionalization::impl::from_functional_tensor(ffn_weight_2);
      } else {
        ffn_weight_2_ = ffn_weight_2;
      }
      
      at::Tensor ffn_bias_2_;
      if (at::functionalization::impl::isFunctionalTensor(ffn_bias_2)) {
        at::functionalization::impl::sync(ffn_bias_2);
        ffn_bias_2_ = at::functionalization::impl::from_functional_tensor(ffn_bias_2);
      } else {
        ffn_bias_2_ = ffn_bias_2;
      }
      
      ::std::optional<at::Tensor> mask_;
      if (at::functionalization::impl::isFunctionalTensor(mask)) {
        at::functionalization::impl::sync(mask);
        mask_ = at::functionalization::impl::from_functional_tensor(mask);
      } else {
        mask_ = mask;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || src.device().type() == c10::DeviceType::XLA || qkv_weight.device().type() == c10::DeviceType::XLA || qkv_bias.device().type() == c10::DeviceType::XLA || proj_weight.device().type() == c10::DeviceType::XLA || proj_bias.device().type() == c10::DeviceType::XLA || norm_weight_1.device().type() == c10::DeviceType::XLA || norm_bias_1.device().type() == c10::DeviceType::XLA || norm_weight_2.device().type() == c10::DeviceType::XLA || norm_bias_2.device().type() == c10::DeviceType::XLA || ffn_weight_1.device().type() == c10::DeviceType::XLA || ffn_bias_1.device().type() == c10::DeviceType::XLA || ffn_weight_2.device().type() == c10::DeviceType::XLA || ffn_bias_2.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(src) || at::functionalization::impl::isFunctionalTensor(qkv_weight) || at::functionalization::impl::isFunctionalTensor(qkv_bias) || at::functionalization::impl::isFunctionalTensor(proj_weight) || at::functionalization::impl::isFunctionalTensor(proj_bias) || at::functionalization::impl::isFunctionalTensor(norm_weight_1) || at::functionalization::impl::isFunctionalTensor(norm_bias_1) || at::functionalization::impl::isFunctionalTensor(norm_weight_2) || at::functionalization::impl::isFunctionalTensor(norm_bias_2) || at::functionalization::impl::isFunctionalTensor(ffn_weight_1) || at::functionalization::impl::isFunctionalTensor(ffn_bias_1) || at::functionalization::impl::isFunctionalTensor(ffn_weight_2) || at::functionalization::impl::isFunctionalTensor(ffn_bias_2) || at::functionalization::impl::isFunctionalTensor(mask))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_transformer_encoder_layer_fwd_out::call(src_, embed_dim, num_heads, qkv_weight_, qkv_bias_, proj_weight_, proj_bias_, use_gelu, norm_first, eps, norm_weight_1_, norm_bias_1_, norm_weight_2_, norm_bias_2_, ffn_weight_1_, ffn_bias_1_, ffn_weight_2_, ffn_bias_2_, mask_, mask_type, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_transformer_encoder_layer_fwd::call(src_, embed_dim, num_heads, qkv_weight_, qkv_bias_, proj_weight_, proj_bias_, use_gelu, norm_first, eps, norm_weight_1_, norm_bias_1_, norm_weight_2_, norm_bias_2_, ffn_weight_1_, ffn_bias_1_, ffn_weight_2_, ffn_bias_2_, mask_, mask_type);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    ::std::tuple<at::Tensor &,at::Tensor &> _native_multi_head_attention_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const ::std::optional<at::Tensor> & mask, bool need_weights, bool average_attn_weights, ::std::optional<int64_t> mask_type, at::Tensor & out0, at::Tensor & out1) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto query_meta = to_meta(query);
        auto key_meta = to_meta(key);
        auto value_meta = to_meta(value);
        auto qkv_weight_meta = to_meta(qkv_weight);
        auto qkv_bias_meta = to_meta(qkv_bias);
        auto proj_weight_meta = to_meta(proj_weight);
        auto proj_bias_meta = to_meta(proj_bias);
        auto mask_meta = to_meta(mask);
        auto out0_meta = to_meta(out0);
        auto out1_meta = to_meta(out1);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_native_multi_head_attention_out::call(query_meta, key_meta, value_meta, embed_dim, num_head, qkv_weight_meta, qkv_bias_meta, proj_weight_meta, proj_bias_meta, mask_meta, need_weights, average_attn_weights, mask_type, out0_meta, out1_meta);
      }
      
      at::Tensor query_;
      if (at::functionalization::impl::isFunctionalTensor(query)) {
        at::functionalization::impl::sync(query);
        query_ = at::functionalization::impl::from_functional_tensor(query);
      } else {
        query_ = query;
      }
      
      at::Tensor key_;
      if (at::functionalization::impl::isFunctionalTensor(key)) {
        at::functionalization::impl::sync(key);
        key_ = at::functionalization::impl::from_functional_tensor(key);
      } else {
        key_ = key;
      }
      
      at::Tensor value_;
      if (at::functionalization::impl::isFunctionalTensor(value)) {
        at::functionalization::impl::sync(value);
        value_ = at::functionalization::impl::from_functional_tensor(value);
      } else {
        value_ = value;
      }
      
      at::Tensor qkv_weight_;
      if (at::functionalization::impl::isFunctionalTensor(qkv_weight)) {
        at::functionalization::impl::sync(qkv_weight);
        qkv_weight_ = at::functionalization::impl::from_functional_tensor(qkv_weight);
      } else {
        qkv_weight_ = qkv_weight;
      }
      
      at::Tensor qkv_bias_;
      if (at::functionalization::impl::isFunctionalTensor(qkv_bias)) {
        at::functionalization::impl::sync(qkv_bias);
        qkv_bias_ = at::functionalization::impl::from_functional_tensor(qkv_bias);
      } else {
        qkv_bias_ = qkv_bias;
      }
      
      at::Tensor proj_weight_;
      if (at::functionalization::impl::isFunctionalTensor(proj_weight)) {
        at::functionalization::impl::sync(proj_weight);
        proj_weight_ = at::functionalization::impl::from_functional_tensor(proj_weight);
      } else {
        proj_weight_ = proj_weight;
      }
      
      at::Tensor proj_bias_;
      if (at::functionalization::impl::isFunctionalTensor(proj_bias)) {
        at::functionalization::impl::sync(proj_bias);
        proj_bias_ = at::functionalization::impl::from_functional_tensor(proj_bias);
      } else {
        proj_bias_ = proj_bias;
      }
      
      ::std::optional<at::Tensor> mask_;
      if (at::functionalization::impl::isFunctionalTensor(mask)) {
        at::functionalization::impl::sync(mask);
        mask_ = at::functionalization::impl::from_functional_tensor(mask);
      } else {
        mask_ = mask;
      }
      
      at::Tensor out0_;
      if (at::functionalization::impl::isFunctionalTensor(out0)) {
        at::functionalization::impl::sync(out0);
        out0_ = at::functionalization::impl::from_functional_tensor(out0);
      } else {
        out0_ = out0;
      }
      
      at::Tensor out1_;
      if (at::functionalization::impl::isFunctionalTensor(out1)) {
        at::functionalization::impl::sync(out1);
        out1_ = at::functionalization::impl::from_functional_tensor(out1);
      } else {
        out1_ = out1;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || query.device().type() == c10::DeviceType::XLA || key.device().type() == c10::DeviceType::XLA || value.device().type() == c10::DeviceType::XLA || qkv_weight.device().type() == c10::DeviceType::XLA || qkv_bias.device().type() == c10::DeviceType::XLA || proj_weight.device().type() == c10::DeviceType::XLA || proj_bias.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(query) || at::functionalization::impl::isFunctionalTensor(key) || at::functionalization::impl::isFunctionalTensor(value) || at::functionalization::impl::isFunctionalTensor(qkv_weight) || at::functionalization::impl::isFunctionalTensor(qkv_bias) || at::functionalization::impl::isFunctionalTensor(proj_weight) || at::functionalization::impl::isFunctionalTensor(proj_bias) || at::functionalization::impl::isFunctionalTensor(mask))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::_native_multi_head_attention_out::call(query_, key_, value_, embed_dim, num_head, qkv_weight_, qkv_bias_, proj_weight_, proj_bias_, mask_, need_weights, average_attn_weights, mask_type, out0_, out1_);
         return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
        }
      } else {
        ::std::tuple<at::Tensor,at::Tensor> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_native_multi_head_attention::call(query_, key_, value_, embed_dim, num_head, qkv_weight_, qkv_bias_, proj_weight_, proj_bias_, mask_, need_weights, average_attn_weights, mask_type);
        }
          auto out0_inner = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::replace_(out0, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(out0);
  at::functionalization::impl::sync(out0);
  auto out0_inner_updated = at::functionalization::impl::from_functional_tensor(out0);
  at::functionalization::impl::propagate_xla_data_direct(out0_inner, out0_inner_updated);
  auto out1_inner = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::replace_(out1, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(out1);
  at::functionalization::impl::sync(out1);
  auto out1_inner_updated = at::functionalization::impl::from_functional_tensor(out1);
  at::functionalization::impl::propagate_xla_data_direct(out1_inner, out1_inner_updated);
    return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);
      }
    }

    at::Tensor & _triton_scaled_dot_attention_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto q_meta = to_meta(q);
        auto k_meta = to_meta(k);
        auto v_meta = to_meta(v);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_triton_scaled_dot_attention_out::call(q_meta, k_meta, v_meta, dropout_p, out_meta);
      }
      
      at::Tensor q_;
      if (at::functionalization::impl::isFunctionalTensor(q)) {
        at::functionalization::impl::sync(q);
        q_ = at::functionalization::impl::from_functional_tensor(q);
      } else {
        q_ = q;
      }
      
      at::Tensor k_;
      if (at::functionalization::impl::isFunctionalTensor(k)) {
        at::functionalization::impl::sync(k);
        k_ = at::functionalization::impl::from_functional_tensor(k);
      } else {
        k_ = k;
      }
      
      at::Tensor v_;
      if (at::functionalization::impl::isFunctionalTensor(v)) {
        at::functionalization::impl::sync(v);
        v_ = at::functionalization::impl::from_functional_tensor(v);
      } else {
        v_ = v;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || q.device().type() == c10::DeviceType::XLA || k.device().type() == c10::DeviceType::XLA || v.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(q) || at::functionalization::impl::isFunctionalTensor(k) || at::functionalization::impl::isFunctionalTensor(v))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_triton_scaled_dot_attention_out::call(q_, k_, v_, dropout_p, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_triton_scaled_dot_attention::call(q_, k_, v_, dropout_p);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _triton_multi_head_attention_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const ::std::optional<at::Tensor> & mask, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto query_meta = to_meta(query);
        auto key_meta = to_meta(key);
        auto value_meta = to_meta(value);
        auto qkv_weight_meta = to_meta(qkv_weight);
        auto qkv_bias_meta = to_meta(qkv_bias);
        auto proj_weight_meta = to_meta(proj_weight);
        auto proj_bias_meta = to_meta(proj_bias);
        auto mask_meta = to_meta(mask);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_triton_multi_head_attention_out::call(query_meta, key_meta, value_meta, embed_dim, num_head, qkv_weight_meta, qkv_bias_meta, proj_weight_meta, proj_bias_meta, mask_meta, out_meta);
      }
      
      at::Tensor query_;
      if (at::functionalization::impl::isFunctionalTensor(query)) {
        at::functionalization::impl::sync(query);
        query_ = at::functionalization::impl::from_functional_tensor(query);
      } else {
        query_ = query;
      }
      
      at::Tensor key_;
      if (at::functionalization::impl::isFunctionalTensor(key)) {
        at::functionalization::impl::sync(key);
        key_ = at::functionalization::impl::from_functional_tensor(key);
      } else {
        key_ = key;
      }
      
      at::Tensor value_;
      if (at::functionalization::impl::isFunctionalTensor(value)) {
        at::functionalization::impl::sync(value);
        value_ = at::functionalization::impl::from_functional_tensor(value);
      } else {
        value_ = value;
      }
      
      at::Tensor qkv_weight_;
      if (at::functionalization::impl::isFunctionalTensor(qkv_weight)) {
        at::functionalization::impl::sync(qkv_weight);
        qkv_weight_ = at::functionalization::impl::from_functional_tensor(qkv_weight);
      } else {
        qkv_weight_ = qkv_weight;
      }
      
      at::Tensor qkv_bias_;
      if (at::functionalization::impl::isFunctionalTensor(qkv_bias)) {
        at::functionalization::impl::sync(qkv_bias);
        qkv_bias_ = at::functionalization::impl::from_functional_tensor(qkv_bias);
      } else {
        qkv_bias_ = qkv_bias;
      }
      
      at::Tensor proj_weight_;
      if (at::functionalization::impl::isFunctionalTensor(proj_weight)) {
        at::functionalization::impl::sync(proj_weight);
        proj_weight_ = at::functionalization::impl::from_functional_tensor(proj_weight);
      } else {
        proj_weight_ = proj_weight;
      }
      
      at::Tensor proj_bias_;
      if (at::functionalization::impl::isFunctionalTensor(proj_bias)) {
        at::functionalization::impl::sync(proj_bias);
        proj_bias_ = at::functionalization::impl::from_functional_tensor(proj_bias);
      } else {
        proj_bias_ = proj_bias;
      }
      
      ::std::optional<at::Tensor> mask_;
      if (at::functionalization::impl::isFunctionalTensor(mask)) {
        at::functionalization::impl::sync(mask);
        mask_ = at::functionalization::impl::from_functional_tensor(mask);
      } else {
        mask_ = mask;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || query.device().type() == c10::DeviceType::XLA || key.device().type() == c10::DeviceType::XLA || value.device().type() == c10::DeviceType::XLA || qkv_weight.device().type() == c10::DeviceType::XLA || qkv_bias.device().type() == c10::DeviceType::XLA || proj_weight.device().type() == c10::DeviceType::XLA || proj_bias.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(query) || at::functionalization::impl::isFunctionalTensor(key) || at::functionalization::impl::isFunctionalTensor(value) || at::functionalization::impl::isFunctionalTensor(qkv_weight) || at::functionalization::impl::isFunctionalTensor(qkv_bias) || at::functionalization::impl::isFunctionalTensor(proj_weight) || at::functionalization::impl::isFunctionalTensor(proj_bias) || at::functionalization::impl::isFunctionalTensor(mask))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_triton_multi_head_attention_out::call(query_, key_, value_, embed_dim, num_head, qkv_weight_, qkv_bias_, proj_weight_, proj_bias_, mask_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_triton_multi_head_attention::call(query_, key_, value_, embed_dim, num_head, qkv_weight_, qkv_bias_, proj_weight_, proj_bias_, mask_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_airy_ai_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_airy_ai_out::call(x_meta, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_airy_ai_out::call(x_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_airy_ai::call(x_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_bessel_j0_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_bessel_j0_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_bessel_j0_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_bessel_j0::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_bessel_j1_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_bessel_j1_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_bessel_j1_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_bessel_j1::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_bessel_y0_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_bessel_y0_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_bessel_y0_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_bessel_y0::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_bessel_y1_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_bessel_y1_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_bessel_y1_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_bessel_y1::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_chebyshev_polynomial_t_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto n_meta = to_meta(n);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_chebyshev_polynomial_t_out::call(x_meta, n_meta, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor n_;
      if (at::functionalization::impl::isFunctionalTensor(n)) {
        at::functionalization::impl::sync(n);
        n_ = at::functionalization::impl::from_functional_tensor(n);
      } else {
        n_ = n;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA || n.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(n))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_chebyshev_polynomial_t_out::call(x_, n_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_chebyshev_polynomial_t::call(x_, n_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_chebyshev_polynomial_t_out_x_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto n_meta = to_meta(n);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_chebyshev_polynomial_t_x_scalar_out::call(x, n_meta, out_meta);
      }
      
      at::Tensor n_;
      if (at::functionalization::impl::isFunctionalTensor(n)) {
        at::functionalization::impl::sync(n);
        n_ = at::functionalization::impl::from_functional_tensor(n);
      } else {
        n_ = n;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || n.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(n))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_chebyshev_polynomial_t_x_scalar_out::call(x, n_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_chebyshev_polynomial_t_x_scalar::call(x, n_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_chebyshev_polynomial_t_out_n_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_chebyshev_polynomial_t_n_scalar_out::call(x_meta, n, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_chebyshev_polynomial_t_n_scalar_out::call(x_, n, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_chebyshev_polynomial_t_n_scalar::call(x_, n);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_chebyshev_polynomial_u_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto n_meta = to_meta(n);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_chebyshev_polynomial_u_out::call(x_meta, n_meta, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor n_;
      if (at::functionalization::impl::isFunctionalTensor(n)) {
        at::functionalization::impl::sync(n);
        n_ = at::functionalization::impl::from_functional_tensor(n);
      } else {
        n_ = n;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA || n.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(n))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_chebyshev_polynomial_u_out::call(x_, n_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_chebyshev_polynomial_u::call(x_, n_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_chebyshev_polynomial_u_out_x_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto n_meta = to_meta(n);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_chebyshev_polynomial_u_x_scalar_out::call(x, n_meta, out_meta);
      }
      
      at::Tensor n_;
      if (at::functionalization::impl::isFunctionalTensor(n)) {
        at::functionalization::impl::sync(n);
        n_ = at::functionalization::impl::from_functional_tensor(n);
      } else {
        n_ = n;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || n.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(n))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_chebyshev_polynomial_u_x_scalar_out::call(x, n_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_chebyshev_polynomial_u_x_scalar::call(x, n_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_chebyshev_polynomial_u_out_n_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_chebyshev_polynomial_u_n_scalar_out::call(x_meta, n, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_chebyshev_polynomial_u_n_scalar_out::call(x_, n, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_chebyshev_polynomial_u_n_scalar::call(x_, n);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_chebyshev_polynomial_v_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto n_meta = to_meta(n);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_chebyshev_polynomial_v_out::call(x_meta, n_meta, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor n_;
      if (at::functionalization::impl::isFunctionalTensor(n)) {
        at::functionalization::impl::sync(n);
        n_ = at::functionalization::impl::from_functional_tensor(n);
      } else {
        n_ = n;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA || n.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(n))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_chebyshev_polynomial_v_out::call(x_, n_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_chebyshev_polynomial_v::call(x_, n_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_chebyshev_polynomial_v_out_x_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto n_meta = to_meta(n);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_chebyshev_polynomial_v_x_scalar_out::call(x, n_meta, out_meta);
      }
      
      at::Tensor n_;
      if (at::functionalization::impl::isFunctionalTensor(n)) {
        at::functionalization::impl::sync(n);
        n_ = at::functionalization::impl::from_functional_tensor(n);
      } else {
        n_ = n;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || n.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(n))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_chebyshev_polynomial_v_x_scalar_out::call(x, n_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_chebyshev_polynomial_v_x_scalar::call(x, n_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_chebyshev_polynomial_v_out_n_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_chebyshev_polynomial_v_n_scalar_out::call(x_meta, n, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_chebyshev_polynomial_v_n_scalar_out::call(x_, n, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_chebyshev_polynomial_v_n_scalar::call(x_, n);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_chebyshev_polynomial_w_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto n_meta = to_meta(n);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_chebyshev_polynomial_w_out::call(x_meta, n_meta, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor n_;
      if (at::functionalization::impl::isFunctionalTensor(n)) {
        at::functionalization::impl::sync(n);
        n_ = at::functionalization::impl::from_functional_tensor(n);
      } else {
        n_ = n;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA || n.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(n))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_chebyshev_polynomial_w_out::call(x_, n_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_chebyshev_polynomial_w::call(x_, n_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_chebyshev_polynomial_w_out_x_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto n_meta = to_meta(n);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_chebyshev_polynomial_w_x_scalar_out::call(x, n_meta, out_meta);
      }
      
      at::Tensor n_;
      if (at::functionalization::impl::isFunctionalTensor(n)) {
        at::functionalization::impl::sync(n);
        n_ = at::functionalization::impl::from_functional_tensor(n);
      } else {
        n_ = n;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || n.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(n))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_chebyshev_polynomial_w_x_scalar_out::call(x, n_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_chebyshev_polynomial_w_x_scalar::call(x, n_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_chebyshev_polynomial_w_out_n_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_chebyshev_polynomial_w_n_scalar_out::call(x_meta, n, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_chebyshev_polynomial_w_n_scalar_out::call(x_, n, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_chebyshev_polynomial_w_n_scalar::call(x_, n);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_hermite_polynomial_h_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto n_meta = to_meta(n);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_hermite_polynomial_h_out::call(x_meta, n_meta, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor n_;
      if (at::functionalization::impl::isFunctionalTensor(n)) {
        at::functionalization::impl::sync(n);
        n_ = at::functionalization::impl::from_functional_tensor(n);
      } else {
        n_ = n;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA || n.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(n))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_hermite_polynomial_h_out::call(x_, n_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_hermite_polynomial_h::call(x_, n_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_hermite_polynomial_h_out_x_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto n_meta = to_meta(n);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_hermite_polynomial_h_x_scalar_out::call(x, n_meta, out_meta);
      }
      
      at::Tensor n_;
      if (at::functionalization::impl::isFunctionalTensor(n)) {
        at::functionalization::impl::sync(n);
        n_ = at::functionalization::impl::from_functional_tensor(n);
      } else {
        n_ = n;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || n.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(n))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_hermite_polynomial_h_x_scalar_out::call(x, n_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_hermite_polynomial_h_x_scalar::call(x, n_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_hermite_polynomial_h_out_n_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_hermite_polynomial_h_n_scalar_out::call(x_meta, n, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_hermite_polynomial_h_n_scalar_out::call(x_, n, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_hermite_polynomial_h_n_scalar::call(x_, n);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_hermite_polynomial_he_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto n_meta = to_meta(n);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_hermite_polynomial_he_out::call(x_meta, n_meta, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor n_;
      if (at::functionalization::impl::isFunctionalTensor(n)) {
        at::functionalization::impl::sync(n);
        n_ = at::functionalization::impl::from_functional_tensor(n);
      } else {
        n_ = n;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA || n.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(n))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_hermite_polynomial_he_out::call(x_, n_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_hermite_polynomial_he::call(x_, n_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_hermite_polynomial_he_out_x_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto n_meta = to_meta(n);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_hermite_polynomial_he_x_scalar_out::call(x, n_meta, out_meta);
      }
      
      at::Tensor n_;
      if (at::functionalization::impl::isFunctionalTensor(n)) {
        at::functionalization::impl::sync(n);
        n_ = at::functionalization::impl::from_functional_tensor(n);
      } else {
        n_ = n;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || n.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(n))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_hermite_polynomial_he_x_scalar_out::call(x, n_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_hermite_polynomial_he_x_scalar::call(x, n_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_hermite_polynomial_he_out_n_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_hermite_polynomial_he_n_scalar_out::call(x_meta, n, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_hermite_polynomial_he_n_scalar_out::call(x_, n, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_hermite_polynomial_he_n_scalar::call(x_, n);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_laguerre_polynomial_l_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto n_meta = to_meta(n);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_laguerre_polynomial_l_out::call(x_meta, n_meta, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor n_;
      if (at::functionalization::impl::isFunctionalTensor(n)) {
        at::functionalization::impl::sync(n);
        n_ = at::functionalization::impl::from_functional_tensor(n);
      } else {
        n_ = n;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA || n.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(n))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_laguerre_polynomial_l_out::call(x_, n_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_laguerre_polynomial_l::call(x_, n_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_laguerre_polynomial_l_out_x_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto n_meta = to_meta(n);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_laguerre_polynomial_l_x_scalar_out::call(x, n_meta, out_meta);
      }
      
      at::Tensor n_;
      if (at::functionalization::impl::isFunctionalTensor(n)) {
        at::functionalization::impl::sync(n);
        n_ = at::functionalization::impl::from_functional_tensor(n);
      } else {
        n_ = n;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || n.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(n))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_laguerre_polynomial_l_x_scalar_out::call(x, n_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_laguerre_polynomial_l_x_scalar::call(x, n_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_laguerre_polynomial_l_out_n_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_laguerre_polynomial_l_n_scalar_out::call(x_meta, n, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_laguerre_polynomial_l_n_scalar_out::call(x_, n, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_laguerre_polynomial_l_n_scalar::call(x_, n);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_legendre_polynomial_p_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto n_meta = to_meta(n);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_legendre_polynomial_p_out::call(x_meta, n_meta, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor n_;
      if (at::functionalization::impl::isFunctionalTensor(n)) {
        at::functionalization::impl::sync(n);
        n_ = at::functionalization::impl::from_functional_tensor(n);
      } else {
        n_ = n;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA || n.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(n))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_legendre_polynomial_p_out::call(x_, n_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_legendre_polynomial_p::call(x_, n_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_legendre_polynomial_p_out_x_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto n_meta = to_meta(n);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_legendre_polynomial_p_x_scalar_out::call(x, n_meta, out_meta);
      }
      
      at::Tensor n_;
      if (at::functionalization::impl::isFunctionalTensor(n)) {
        at::functionalization::impl::sync(n);
        n_ = at::functionalization::impl::from_functional_tensor(n);
      } else {
        n_ = n;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || n.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(n))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_legendre_polynomial_p_x_scalar_out::call(x, n_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_legendre_polynomial_p_x_scalar::call(x, n_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_legendre_polynomial_p_out_n_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_legendre_polynomial_p_n_scalar_out::call(x_meta, n, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_legendre_polynomial_p_n_scalar_out::call(x_, n, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_legendre_polynomial_p_n_scalar::call(x_, n);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_modified_bessel_i0_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_modified_bessel_i0_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_modified_bessel_i0_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_modified_bessel_i0::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_modified_bessel_i1_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_modified_bessel_i1_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_modified_bessel_i1_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_modified_bessel_i1::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_modified_bessel_k0_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_modified_bessel_k0_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_modified_bessel_k0_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_modified_bessel_k0::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_modified_bessel_k1_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_modified_bessel_k1_out::call(self_meta, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_modified_bessel_k1_out::call(self_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_modified_bessel_k1::call(self_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_scaled_modified_bessel_k0_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_scaled_modified_bessel_k0_out::call(x_meta, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_scaled_modified_bessel_k0_out::call(x_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_scaled_modified_bessel_k0::call(x_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_scaled_modified_bessel_k1_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_scaled_modified_bessel_k1_out::call(x_meta, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_scaled_modified_bessel_k1_out::call(x_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_scaled_modified_bessel_k1::call(x_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_shifted_chebyshev_polynomial_t_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto n_meta = to_meta(n);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_shifted_chebyshev_polynomial_t_out::call(x_meta, n_meta, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor n_;
      if (at::functionalization::impl::isFunctionalTensor(n)) {
        at::functionalization::impl::sync(n);
        n_ = at::functionalization::impl::from_functional_tensor(n);
      } else {
        n_ = n;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA || n.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(n))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_shifted_chebyshev_polynomial_t_out::call(x_, n_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_shifted_chebyshev_polynomial_t::call(x_, n_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_shifted_chebyshev_polynomial_t_out_x_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto n_meta = to_meta(n);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_shifted_chebyshev_polynomial_t_x_scalar_out::call(x, n_meta, out_meta);
      }
      
      at::Tensor n_;
      if (at::functionalization::impl::isFunctionalTensor(n)) {
        at::functionalization::impl::sync(n);
        n_ = at::functionalization::impl::from_functional_tensor(n);
      } else {
        n_ = n;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || n.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(n))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_shifted_chebyshev_polynomial_t_x_scalar_out::call(x, n_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_shifted_chebyshev_polynomial_t_x_scalar::call(x, n_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_shifted_chebyshev_polynomial_t_out_n_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_shifted_chebyshev_polynomial_t_n_scalar_out::call(x_meta, n, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_shifted_chebyshev_polynomial_t_n_scalar_out::call(x_, n, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_shifted_chebyshev_polynomial_t_n_scalar::call(x_, n);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_shifted_chebyshev_polynomial_u_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto n_meta = to_meta(n);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_shifted_chebyshev_polynomial_u_out::call(x_meta, n_meta, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor n_;
      if (at::functionalization::impl::isFunctionalTensor(n)) {
        at::functionalization::impl::sync(n);
        n_ = at::functionalization::impl::from_functional_tensor(n);
      } else {
        n_ = n;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA || n.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(n))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_shifted_chebyshev_polynomial_u_out::call(x_, n_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_shifted_chebyshev_polynomial_u::call(x_, n_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_shifted_chebyshev_polynomial_u_out_x_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto n_meta = to_meta(n);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_shifted_chebyshev_polynomial_u_x_scalar_out::call(x, n_meta, out_meta);
      }
      
      at::Tensor n_;
      if (at::functionalization::impl::isFunctionalTensor(n)) {
        at::functionalization::impl::sync(n);
        n_ = at::functionalization::impl::from_functional_tensor(n);
      } else {
        n_ = n;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || n.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(n))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_shifted_chebyshev_polynomial_u_x_scalar_out::call(x, n_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_shifted_chebyshev_polynomial_u_x_scalar::call(x, n_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_shifted_chebyshev_polynomial_u_out_n_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_shifted_chebyshev_polynomial_u_n_scalar_out::call(x_meta, n, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_shifted_chebyshev_polynomial_u_n_scalar_out::call(x_, n, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_shifted_chebyshev_polynomial_u_n_scalar::call(x_, n);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_shifted_chebyshev_polynomial_v_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto n_meta = to_meta(n);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_shifted_chebyshev_polynomial_v_out::call(x_meta, n_meta, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor n_;
      if (at::functionalization::impl::isFunctionalTensor(n)) {
        at::functionalization::impl::sync(n);
        n_ = at::functionalization::impl::from_functional_tensor(n);
      } else {
        n_ = n;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA || n.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(n))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_shifted_chebyshev_polynomial_v_out::call(x_, n_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_shifted_chebyshev_polynomial_v::call(x_, n_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_shifted_chebyshev_polynomial_v_out_x_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto n_meta = to_meta(n);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_shifted_chebyshev_polynomial_v_x_scalar_out::call(x, n_meta, out_meta);
      }
      
      at::Tensor n_;
      if (at::functionalization::impl::isFunctionalTensor(n)) {
        at::functionalization::impl::sync(n);
        n_ = at::functionalization::impl::from_functional_tensor(n);
      } else {
        n_ = n;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || n.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(n))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_shifted_chebyshev_polynomial_v_x_scalar_out::call(x, n_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_shifted_chebyshev_polynomial_v_x_scalar::call(x, n_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_shifted_chebyshev_polynomial_v_out_n_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_shifted_chebyshev_polynomial_v_n_scalar_out::call(x_meta, n, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_shifted_chebyshev_polynomial_v_n_scalar_out::call(x_, n, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_shifted_chebyshev_polynomial_v_n_scalar::call(x_, n);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_shifted_chebyshev_polynomial_w_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto n_meta = to_meta(n);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_shifted_chebyshev_polynomial_w_out::call(x_meta, n_meta, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor n_;
      if (at::functionalization::impl::isFunctionalTensor(n)) {
        at::functionalization::impl::sync(n);
        n_ = at::functionalization::impl::from_functional_tensor(n);
      } else {
        n_ = n;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA || n.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(n))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_shifted_chebyshev_polynomial_w_out::call(x_, n_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_shifted_chebyshev_polynomial_w::call(x_, n_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_shifted_chebyshev_polynomial_w_out_x_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto n_meta = to_meta(n);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_shifted_chebyshev_polynomial_w_x_scalar_out::call(x, n_meta, out_meta);
      }
      
      at::Tensor n_;
      if (at::functionalization::impl::isFunctionalTensor(n)) {
        at::functionalization::impl::sync(n);
        n_ = at::functionalization::impl::from_functional_tensor(n);
      } else {
        n_ = n;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || n.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(n))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_shifted_chebyshev_polynomial_w_x_scalar_out::call(x, n_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_shifted_chebyshev_polynomial_w_x_scalar::call(x, n_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_shifted_chebyshev_polynomial_w_out_n_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_shifted_chebyshev_polynomial_w_n_scalar_out::call(x_meta, n, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_shifted_chebyshev_polynomial_w_n_scalar_out::call(x_, n, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_shifted_chebyshev_polynomial_w_n_scalar::call(x_, n);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & special_spherical_bessel_j0_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto x_meta = to_meta(x);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::special_spherical_bessel_j0_out::call(x_meta, out_meta);
      }
      
      at::Tensor x_;
      if (at::functionalization::impl::isFunctionalTensor(x)) {
        at::functionalization::impl::sync(x);
        x_ = at::functionalization::impl::from_functional_tensor(x);
      } else {
        x_ = x;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || x.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(x))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::special_spherical_bessel_j0_out::call(x_, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::special_spherical_bessel_j0::call(x_);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    at::Tensor & _foobar_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool arg1, bool arg2, bool arg3, at::Tensor & out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_foobar_out::call(self_meta, arg1, arg2, arg3, out_meta);
      }
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || self.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::Tensor tmp_output = at::_ops::_foobar_out::call(self_, arg1, arg2, arg3, out_);
         return out;
        }
      } else {
        at::Tensor tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_foobar::call(self_, arg1, arg2, arg3);
        }
          auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, tmp_output);
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    return out;
      }
    }

    void _fused_adam_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto grads_meta = to_meta(grads);
        auto exp_avgs_meta = to_meta(exp_avgs);
        auto exp_avg_sqs_meta = to_meta(exp_avg_sqs);
        auto max_exp_avg_sqs_meta = to_meta(max_exp_avg_sqs);
        auto state_steps_meta = to_meta(state_steps);
        auto grad_scale_meta = to_meta(grad_scale);
        auto found_inf_meta = to_meta(found_inf);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_fused_adam_out::call(self_meta, grads_meta, exp_avgs_meta, exp_avg_sqs_meta, max_exp_avg_sqs_meta, state_steps_meta, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_meta, found_inf_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> grads_;
      if (at::functionalization::impl::isFunctionalTensor(grads)) {
        at::functionalization::impl::sync(grads);
        grads_ = at::functionalization::impl::from_functional_tensor(grads);
      } else {
        grads_ = grads.vec();
      }
      
      ::std::vector<at::Tensor> exp_avgs_;
      if (at::functionalization::impl::isFunctionalTensor(exp_avgs)) {
        at::functionalization::impl::sync(exp_avgs);
        exp_avgs_ = at::functionalization::impl::from_functional_tensor(exp_avgs);
      } else {
        exp_avgs_ = exp_avgs.vec();
      }
      
      ::std::vector<at::Tensor> exp_avg_sqs_;
      if (at::functionalization::impl::isFunctionalTensor(exp_avg_sqs)) {
        at::functionalization::impl::sync(exp_avg_sqs);
        exp_avg_sqs_ = at::functionalization::impl::from_functional_tensor(exp_avg_sqs);
      } else {
        exp_avg_sqs_ = exp_avg_sqs.vec();
      }
      
      ::std::vector<at::Tensor> max_exp_avg_sqs_;
      if (at::functionalization::impl::isFunctionalTensor(max_exp_avg_sqs)) {
        at::functionalization::impl::sync(max_exp_avg_sqs);
        max_exp_avg_sqs_ = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs);
      } else {
        max_exp_avg_sqs_ = max_exp_avg_sqs.vec();
      }
      
      ::std::vector<at::Tensor> state_steps_;
      if (at::functionalization::impl::isFunctionalTensor(state_steps)) {
        at::functionalization::impl::sync(state_steps);
        state_steps_ = at::functionalization::impl::from_functional_tensor(state_steps);
      } else {
        state_steps_ = state_steps.vec();
      }
      
      ::std::optional<at::Tensor> grad_scale_;
      if (at::functionalization::impl::isFunctionalTensor(grad_scale)) {
        at::functionalization::impl::sync(grad_scale);
        grad_scale_ = at::functionalization::impl::from_functional_tensor(grad_scale);
      } else {
        grad_scale_ = grad_scale;
      }
      
      ::std::optional<at::Tensor> found_inf_;
      if (at::functionalization::impl::isFunctionalTensor(found_inf)) {
        at::functionalization::impl::sync(found_inf);
        found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf);
      } else {
        found_inf_ = found_inf;
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grads) && at::functionalization::impl::isFunctionalTensor(exp_avgs) && at::functionalization::impl::isFunctionalTensor(exp_avg_sqs) && at::functionalization::impl::isFunctionalTensor(max_exp_avg_sqs) && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(state_steps) || at::functionalization::impl::isFunctionalTensor(grad_scale) || at::functionalization::impl::isFunctionalTensor(found_inf))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_fused_adam_out::call(self_, grads_, exp_avgs_, exp_avg_sqs_, max_exp_avg_sqs_, state_steps_, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_, found_inf_, out_);
         
        }
      } else {
        ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_fused_adam::call(self_, grads_, exp_avgs_, exp_avg_sqs_, max_exp_avg_sqs_, state_steps_, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_, found_inf_);
        }
          auto grads_inner = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::replace_(grads, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(grads);
  at::functionalization::impl::sync(grads);
  auto grads_inner_updated = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::propagate_xla_data_direct(grads_inner, grads_inner_updated);
  auto exp_avgs_inner = at::functionalization::impl::from_functional_tensor(exp_avgs);
  at::functionalization::impl::replace_(exp_avgs, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(exp_avgs);
  at::functionalization::impl::sync(exp_avgs);
  auto exp_avgs_inner_updated = at::functionalization::impl::from_functional_tensor(exp_avgs);
  at::functionalization::impl::propagate_xla_data_direct(exp_avgs_inner, exp_avgs_inner_updated);
  auto exp_avg_sqs_inner = at::functionalization::impl::from_functional_tensor(exp_avg_sqs);
  at::functionalization::impl::replace_(exp_avg_sqs, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(exp_avg_sqs);
  at::functionalization::impl::sync(exp_avg_sqs);
  auto exp_avg_sqs_inner_updated = at::functionalization::impl::from_functional_tensor(exp_avg_sqs);
  at::functionalization::impl::propagate_xla_data_direct(exp_avg_sqs_inner, exp_avg_sqs_inner_updated);
  auto max_exp_avg_sqs_inner = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs);
  at::functionalization::impl::replace_(max_exp_avg_sqs, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(max_exp_avg_sqs);
  at::functionalization::impl::sync(max_exp_avg_sqs);
  auto max_exp_avg_sqs_inner_updated = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs);
  at::functionalization::impl::propagate_xla_data_direct(max_exp_avg_sqs_inner, max_exp_avg_sqs_inner_updated);
  auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, std::get<4>(tmp_output));
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _fused_adam_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto grads_meta = to_meta(grads);
        auto exp_avgs_meta = to_meta(exp_avgs);
        auto exp_avg_sqs_meta = to_meta(exp_avg_sqs);
        auto max_exp_avg_sqs_meta = to_meta(max_exp_avg_sqs);
        auto state_steps_meta = to_meta(state_steps);
        auto grad_scale_meta = to_meta(grad_scale);
        auto found_inf_meta = to_meta(found_inf);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_fused_adam_::call(self_meta, grads_meta, exp_avgs_meta, exp_avg_sqs_meta, max_exp_avg_sqs_meta, state_steps_meta, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_meta, found_inf_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> grads_;
      if (at::functionalization::impl::isFunctionalTensor(grads)) {
        at::functionalization::impl::sync(grads);
        grads_ = at::functionalization::impl::from_functional_tensor(grads);
      } else {
        grads_ = grads.vec();
      }
      
      ::std::vector<at::Tensor> exp_avgs_;
      if (at::functionalization::impl::isFunctionalTensor(exp_avgs)) {
        at::functionalization::impl::sync(exp_avgs);
        exp_avgs_ = at::functionalization::impl::from_functional_tensor(exp_avgs);
      } else {
        exp_avgs_ = exp_avgs.vec();
      }
      
      ::std::vector<at::Tensor> exp_avg_sqs_;
      if (at::functionalization::impl::isFunctionalTensor(exp_avg_sqs)) {
        at::functionalization::impl::sync(exp_avg_sqs);
        exp_avg_sqs_ = at::functionalization::impl::from_functional_tensor(exp_avg_sqs);
      } else {
        exp_avg_sqs_ = exp_avg_sqs.vec();
      }
      
      ::std::vector<at::Tensor> max_exp_avg_sqs_;
      if (at::functionalization::impl::isFunctionalTensor(max_exp_avg_sqs)) {
        at::functionalization::impl::sync(max_exp_avg_sqs);
        max_exp_avg_sqs_ = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs);
      } else {
        max_exp_avg_sqs_ = max_exp_avg_sqs.vec();
      }
      
      ::std::vector<at::Tensor> state_steps_;
      if (at::functionalization::impl::isFunctionalTensor(state_steps)) {
        at::functionalization::impl::sync(state_steps);
        state_steps_ = at::functionalization::impl::from_functional_tensor(state_steps);
      } else {
        state_steps_ = state_steps.vec();
      }
      
      ::std::optional<at::Tensor> grad_scale_;
      if (at::functionalization::impl::isFunctionalTensor(grad_scale)) {
        at::functionalization::impl::sync(grad_scale);
        grad_scale_ = at::functionalization::impl::from_functional_tensor(grad_scale);
      } else {
        grad_scale_ = grad_scale;
      }
      
      ::std::optional<at::Tensor> found_inf_;
      if (at::functionalization::impl::isFunctionalTensor(found_inf)) {
        at::functionalization::impl::sync(found_inf);
        found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf);
      } else {
        found_inf_ = found_inf;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self) && at::functionalization::impl::isFunctionalTensor(grads) && at::functionalization::impl::isFunctionalTensor(exp_avgs) && at::functionalization::impl::isFunctionalTensor(exp_avg_sqs) && at::functionalization::impl::isFunctionalTensor(max_exp_avg_sqs))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(state_steps) || at::functionalization::impl::isFunctionalTensor(grad_scale) || at::functionalization::impl::isFunctionalTensor(found_inf))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_fused_adam_::call(self_, grads_, exp_avgs_, exp_avg_sqs_, max_exp_avg_sqs_, state_steps_, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_, found_inf_);
         
        }
      } else {
        ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_fused_adam::call(self_, grads_, exp_avgs_, exp_avg_sqs_, max_exp_avg_sqs_, state_steps_, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_, found_inf_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
  auto grads_inner = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::replace_(grads, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(grads);
  at::functionalization::impl::sync(grads);
  auto grads_inner_updated = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::propagate_xla_data_direct(grads_inner, grads_inner_updated);
  auto exp_avgs_inner = at::functionalization::impl::from_functional_tensor(exp_avgs);
  at::functionalization::impl::replace_(exp_avgs, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(exp_avgs);
  at::functionalization::impl::sync(exp_avgs);
  auto exp_avgs_inner_updated = at::functionalization::impl::from_functional_tensor(exp_avgs);
  at::functionalization::impl::propagate_xla_data_direct(exp_avgs_inner, exp_avgs_inner_updated);
  auto exp_avg_sqs_inner = at::functionalization::impl::from_functional_tensor(exp_avg_sqs);
  at::functionalization::impl::replace_(exp_avg_sqs, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(exp_avg_sqs);
  at::functionalization::impl::sync(exp_avg_sqs);
  auto exp_avg_sqs_inner_updated = at::functionalization::impl::from_functional_tensor(exp_avg_sqs);
  at::functionalization::impl::propagate_xla_data_direct(exp_avg_sqs_inner, exp_avg_sqs_inner_updated);
  auto max_exp_avg_sqs_inner = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs);
  at::functionalization::impl::replace_(max_exp_avg_sqs, std::get<4>(tmp_output));
  at::functionalization::impl::commit_update(max_exp_avg_sqs);
  at::functionalization::impl::sync(max_exp_avg_sqs);
  auto max_exp_avg_sqs_inner_updated = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs);
  at::functionalization::impl::propagate_xla_data_direct(max_exp_avg_sqs_inner, max_exp_avg_sqs_inner_updated);
    
      }
    }

    void _fused_adam_out_tensor_lr_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto grads_meta = to_meta(grads);
        auto exp_avgs_meta = to_meta(exp_avgs);
        auto exp_avg_sqs_meta = to_meta(exp_avg_sqs);
        auto max_exp_avg_sqs_meta = to_meta(max_exp_avg_sqs);
        auto state_steps_meta = to_meta(state_steps);
        auto lr_meta = to_meta(lr);
        auto grad_scale_meta = to_meta(grad_scale);
        auto found_inf_meta = to_meta(found_inf);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_fused_adam_tensor_lr_out::call(self_meta, grads_meta, exp_avgs_meta, exp_avg_sqs_meta, max_exp_avg_sqs_meta, state_steps_meta, lr_meta, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_meta, found_inf_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> grads_;
      if (at::functionalization::impl::isFunctionalTensor(grads)) {
        at::functionalization::impl::sync(grads);
        grads_ = at::functionalization::impl::from_functional_tensor(grads);
      } else {
        grads_ = grads.vec();
      }
      
      ::std::vector<at::Tensor> exp_avgs_;
      if (at::functionalization::impl::isFunctionalTensor(exp_avgs)) {
        at::functionalization::impl::sync(exp_avgs);
        exp_avgs_ = at::functionalization::impl::from_functional_tensor(exp_avgs);
      } else {
        exp_avgs_ = exp_avgs.vec();
      }
      
      ::std::vector<at::Tensor> exp_avg_sqs_;
      if (at::functionalization::impl::isFunctionalTensor(exp_avg_sqs)) {
        at::functionalization::impl::sync(exp_avg_sqs);
        exp_avg_sqs_ = at::functionalization::impl::from_functional_tensor(exp_avg_sqs);
      } else {
        exp_avg_sqs_ = exp_avg_sqs.vec();
      }
      
      ::std::vector<at::Tensor> max_exp_avg_sqs_;
      if (at::functionalization::impl::isFunctionalTensor(max_exp_avg_sqs)) {
        at::functionalization::impl::sync(max_exp_avg_sqs);
        max_exp_avg_sqs_ = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs);
      } else {
        max_exp_avg_sqs_ = max_exp_avg_sqs.vec();
      }
      
      ::std::vector<at::Tensor> state_steps_;
      if (at::functionalization::impl::isFunctionalTensor(state_steps)) {
        at::functionalization::impl::sync(state_steps);
        state_steps_ = at::functionalization::impl::from_functional_tensor(state_steps);
      } else {
        state_steps_ = state_steps.vec();
      }
      
      at::Tensor lr_;
      if (at::functionalization::impl::isFunctionalTensor(lr)) {
        at::functionalization::impl::sync(lr);
        lr_ = at::functionalization::impl::from_functional_tensor(lr);
      } else {
        lr_ = lr;
      }
      
      ::std::optional<at::Tensor> grad_scale_;
      if (at::functionalization::impl::isFunctionalTensor(grad_scale)) {
        at::functionalization::impl::sync(grad_scale);
        grad_scale_ = at::functionalization::impl::from_functional_tensor(grad_scale);
      } else {
        grad_scale_ = grad_scale;
      }
      
      ::std::optional<at::Tensor> found_inf_;
      if (at::functionalization::impl::isFunctionalTensor(found_inf)) {
        at::functionalization::impl::sync(found_inf);
        found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf);
      } else {
        found_inf_ = found_inf;
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grads) && at::functionalization::impl::isFunctionalTensor(exp_avgs) && at::functionalization::impl::isFunctionalTensor(exp_avg_sqs) && at::functionalization::impl::isFunctionalTensor(max_exp_avg_sqs) && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || lr.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(state_steps) || at::functionalization::impl::isFunctionalTensor(lr) || at::functionalization::impl::isFunctionalTensor(grad_scale) || at::functionalization::impl::isFunctionalTensor(found_inf))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_fused_adam_tensor_lr_out::call(self_, grads_, exp_avgs_, exp_avg_sqs_, max_exp_avg_sqs_, state_steps_, lr_, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_, found_inf_, out_);
         
        }
      } else {
        ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_fused_adam_tensor_lr::call(self_, grads_, exp_avgs_, exp_avg_sqs_, max_exp_avg_sqs_, state_steps_, lr_, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_, found_inf_);
        }
          auto grads_inner = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::replace_(grads, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(grads);
  at::functionalization::impl::sync(grads);
  auto grads_inner_updated = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::propagate_xla_data_direct(grads_inner, grads_inner_updated);
  auto exp_avgs_inner = at::functionalization::impl::from_functional_tensor(exp_avgs);
  at::functionalization::impl::replace_(exp_avgs, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(exp_avgs);
  at::functionalization::impl::sync(exp_avgs);
  auto exp_avgs_inner_updated = at::functionalization::impl::from_functional_tensor(exp_avgs);
  at::functionalization::impl::propagate_xla_data_direct(exp_avgs_inner, exp_avgs_inner_updated);
  auto exp_avg_sqs_inner = at::functionalization::impl::from_functional_tensor(exp_avg_sqs);
  at::functionalization::impl::replace_(exp_avg_sqs, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(exp_avg_sqs);
  at::functionalization::impl::sync(exp_avg_sqs);
  auto exp_avg_sqs_inner_updated = at::functionalization::impl::from_functional_tensor(exp_avg_sqs);
  at::functionalization::impl::propagate_xla_data_direct(exp_avg_sqs_inner, exp_avg_sqs_inner_updated);
  auto max_exp_avg_sqs_inner = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs);
  at::functionalization::impl::replace_(max_exp_avg_sqs, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(max_exp_avg_sqs);
  at::functionalization::impl::sync(max_exp_avg_sqs);
  auto max_exp_avg_sqs_inner_updated = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs);
  at::functionalization::impl::propagate_xla_data_direct(max_exp_avg_sqs_inner, max_exp_avg_sqs_inner_updated);
  auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, std::get<4>(tmp_output));
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _fused_adam__tensor_lr(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto grads_meta = to_meta(grads);
        auto exp_avgs_meta = to_meta(exp_avgs);
        auto exp_avg_sqs_meta = to_meta(exp_avg_sqs);
        auto max_exp_avg_sqs_meta = to_meta(max_exp_avg_sqs);
        auto state_steps_meta = to_meta(state_steps);
        auto lr_meta = to_meta(lr);
        auto grad_scale_meta = to_meta(grad_scale);
        auto found_inf_meta = to_meta(found_inf);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_fused_adam__tensor_lr::call(self_meta, grads_meta, exp_avgs_meta, exp_avg_sqs_meta, max_exp_avg_sqs_meta, state_steps_meta, lr_meta, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_meta, found_inf_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> grads_;
      if (at::functionalization::impl::isFunctionalTensor(grads)) {
        at::functionalization::impl::sync(grads);
        grads_ = at::functionalization::impl::from_functional_tensor(grads);
      } else {
        grads_ = grads.vec();
      }
      
      ::std::vector<at::Tensor> exp_avgs_;
      if (at::functionalization::impl::isFunctionalTensor(exp_avgs)) {
        at::functionalization::impl::sync(exp_avgs);
        exp_avgs_ = at::functionalization::impl::from_functional_tensor(exp_avgs);
      } else {
        exp_avgs_ = exp_avgs.vec();
      }
      
      ::std::vector<at::Tensor> exp_avg_sqs_;
      if (at::functionalization::impl::isFunctionalTensor(exp_avg_sqs)) {
        at::functionalization::impl::sync(exp_avg_sqs);
        exp_avg_sqs_ = at::functionalization::impl::from_functional_tensor(exp_avg_sqs);
      } else {
        exp_avg_sqs_ = exp_avg_sqs.vec();
      }
      
      ::std::vector<at::Tensor> max_exp_avg_sqs_;
      if (at::functionalization::impl::isFunctionalTensor(max_exp_avg_sqs)) {
        at::functionalization::impl::sync(max_exp_avg_sqs);
        max_exp_avg_sqs_ = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs);
      } else {
        max_exp_avg_sqs_ = max_exp_avg_sqs.vec();
      }
      
      ::std::vector<at::Tensor> state_steps_;
      if (at::functionalization::impl::isFunctionalTensor(state_steps)) {
        at::functionalization::impl::sync(state_steps);
        state_steps_ = at::functionalization::impl::from_functional_tensor(state_steps);
      } else {
        state_steps_ = state_steps.vec();
      }
      
      at::Tensor lr_;
      if (at::functionalization::impl::isFunctionalTensor(lr)) {
        at::functionalization::impl::sync(lr);
        lr_ = at::functionalization::impl::from_functional_tensor(lr);
      } else {
        lr_ = lr;
      }
      
      ::std::optional<at::Tensor> grad_scale_;
      if (at::functionalization::impl::isFunctionalTensor(grad_scale)) {
        at::functionalization::impl::sync(grad_scale);
        grad_scale_ = at::functionalization::impl::from_functional_tensor(grad_scale);
      } else {
        grad_scale_ = grad_scale;
      }
      
      ::std::optional<at::Tensor> found_inf_;
      if (at::functionalization::impl::isFunctionalTensor(found_inf)) {
        at::functionalization::impl::sync(found_inf);
        found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf);
      } else {
        found_inf_ = found_inf;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self) && at::functionalization::impl::isFunctionalTensor(grads) && at::functionalization::impl::isFunctionalTensor(exp_avgs) && at::functionalization::impl::isFunctionalTensor(exp_avg_sqs) && at::functionalization::impl::isFunctionalTensor(max_exp_avg_sqs))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || lr.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(state_steps) || at::functionalization::impl::isFunctionalTensor(lr) || at::functionalization::impl::isFunctionalTensor(grad_scale) || at::functionalization::impl::isFunctionalTensor(found_inf))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_fused_adam__tensor_lr::call(self_, grads_, exp_avgs_, exp_avg_sqs_, max_exp_avg_sqs_, state_steps_, lr_, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_, found_inf_);
         
        }
      } else {
        ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_fused_adam_tensor_lr::call(self_, grads_, exp_avgs_, exp_avg_sqs_, max_exp_avg_sqs_, state_steps_, lr_, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_, found_inf_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
  auto grads_inner = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::replace_(grads, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(grads);
  at::functionalization::impl::sync(grads);
  auto grads_inner_updated = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::propagate_xla_data_direct(grads_inner, grads_inner_updated);
  auto exp_avgs_inner = at::functionalization::impl::from_functional_tensor(exp_avgs);
  at::functionalization::impl::replace_(exp_avgs, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(exp_avgs);
  at::functionalization::impl::sync(exp_avgs);
  auto exp_avgs_inner_updated = at::functionalization::impl::from_functional_tensor(exp_avgs);
  at::functionalization::impl::propagate_xla_data_direct(exp_avgs_inner, exp_avgs_inner_updated);
  auto exp_avg_sqs_inner = at::functionalization::impl::from_functional_tensor(exp_avg_sqs);
  at::functionalization::impl::replace_(exp_avg_sqs, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(exp_avg_sqs);
  at::functionalization::impl::sync(exp_avg_sqs);
  auto exp_avg_sqs_inner_updated = at::functionalization::impl::from_functional_tensor(exp_avg_sqs);
  at::functionalization::impl::propagate_xla_data_direct(exp_avg_sqs_inner, exp_avg_sqs_inner_updated);
  auto max_exp_avg_sqs_inner = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs);
  at::functionalization::impl::replace_(max_exp_avg_sqs, std::get<4>(tmp_output));
  at::functionalization::impl::commit_update(max_exp_avg_sqs);
  at::functionalization::impl::sync(max_exp_avg_sqs);
  auto max_exp_avg_sqs_inner_updated = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs);
  at::functionalization::impl::propagate_xla_data_direct(max_exp_avg_sqs_inner, max_exp_avg_sqs_inner_updated);
    
      }
    }

    void _fused_adamw_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto grads_meta = to_meta(grads);
        auto exp_avgs_meta = to_meta(exp_avgs);
        auto exp_avg_sqs_meta = to_meta(exp_avg_sqs);
        auto max_exp_avg_sqs_meta = to_meta(max_exp_avg_sqs);
        auto state_steps_meta = to_meta(state_steps);
        auto grad_scale_meta = to_meta(grad_scale);
        auto found_inf_meta = to_meta(found_inf);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_fused_adamw_out::call(self_meta, grads_meta, exp_avgs_meta, exp_avg_sqs_meta, max_exp_avg_sqs_meta, state_steps_meta, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_meta, found_inf_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> grads_;
      if (at::functionalization::impl::isFunctionalTensor(grads)) {
        at::functionalization::impl::sync(grads);
        grads_ = at::functionalization::impl::from_functional_tensor(grads);
      } else {
        grads_ = grads.vec();
      }
      
      ::std::vector<at::Tensor> exp_avgs_;
      if (at::functionalization::impl::isFunctionalTensor(exp_avgs)) {
        at::functionalization::impl::sync(exp_avgs);
        exp_avgs_ = at::functionalization::impl::from_functional_tensor(exp_avgs);
      } else {
        exp_avgs_ = exp_avgs.vec();
      }
      
      ::std::vector<at::Tensor> exp_avg_sqs_;
      if (at::functionalization::impl::isFunctionalTensor(exp_avg_sqs)) {
        at::functionalization::impl::sync(exp_avg_sqs);
        exp_avg_sqs_ = at::functionalization::impl::from_functional_tensor(exp_avg_sqs);
      } else {
        exp_avg_sqs_ = exp_avg_sqs.vec();
      }
      
      ::std::vector<at::Tensor> max_exp_avg_sqs_;
      if (at::functionalization::impl::isFunctionalTensor(max_exp_avg_sqs)) {
        at::functionalization::impl::sync(max_exp_avg_sqs);
        max_exp_avg_sqs_ = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs);
      } else {
        max_exp_avg_sqs_ = max_exp_avg_sqs.vec();
      }
      
      ::std::vector<at::Tensor> state_steps_;
      if (at::functionalization::impl::isFunctionalTensor(state_steps)) {
        at::functionalization::impl::sync(state_steps);
        state_steps_ = at::functionalization::impl::from_functional_tensor(state_steps);
      } else {
        state_steps_ = state_steps.vec();
      }
      
      ::std::optional<at::Tensor> grad_scale_;
      if (at::functionalization::impl::isFunctionalTensor(grad_scale)) {
        at::functionalization::impl::sync(grad_scale);
        grad_scale_ = at::functionalization::impl::from_functional_tensor(grad_scale);
      } else {
        grad_scale_ = grad_scale;
      }
      
      ::std::optional<at::Tensor> found_inf_;
      if (at::functionalization::impl::isFunctionalTensor(found_inf)) {
        at::functionalization::impl::sync(found_inf);
        found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf);
      } else {
        found_inf_ = found_inf;
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grads) && at::functionalization::impl::isFunctionalTensor(exp_avgs) && at::functionalization::impl::isFunctionalTensor(exp_avg_sqs) && at::functionalization::impl::isFunctionalTensor(max_exp_avg_sqs) && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(state_steps) || at::functionalization::impl::isFunctionalTensor(grad_scale) || at::functionalization::impl::isFunctionalTensor(found_inf))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_fused_adamw_out::call(self_, grads_, exp_avgs_, exp_avg_sqs_, max_exp_avg_sqs_, state_steps_, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_, found_inf_, out_);
         
        }
      } else {
        ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_fused_adamw::call(self_, grads_, exp_avgs_, exp_avg_sqs_, max_exp_avg_sqs_, state_steps_, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_, found_inf_);
        }
          auto grads_inner = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::replace_(grads, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(grads);
  at::functionalization::impl::sync(grads);
  auto grads_inner_updated = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::propagate_xla_data_direct(grads_inner, grads_inner_updated);
  auto exp_avgs_inner = at::functionalization::impl::from_functional_tensor(exp_avgs);
  at::functionalization::impl::replace_(exp_avgs, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(exp_avgs);
  at::functionalization::impl::sync(exp_avgs);
  auto exp_avgs_inner_updated = at::functionalization::impl::from_functional_tensor(exp_avgs);
  at::functionalization::impl::propagate_xla_data_direct(exp_avgs_inner, exp_avgs_inner_updated);
  auto exp_avg_sqs_inner = at::functionalization::impl::from_functional_tensor(exp_avg_sqs);
  at::functionalization::impl::replace_(exp_avg_sqs, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(exp_avg_sqs);
  at::functionalization::impl::sync(exp_avg_sqs);
  auto exp_avg_sqs_inner_updated = at::functionalization::impl::from_functional_tensor(exp_avg_sqs);
  at::functionalization::impl::propagate_xla_data_direct(exp_avg_sqs_inner, exp_avg_sqs_inner_updated);
  auto max_exp_avg_sqs_inner = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs);
  at::functionalization::impl::replace_(max_exp_avg_sqs, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(max_exp_avg_sqs);
  at::functionalization::impl::sync(max_exp_avg_sqs);
  auto max_exp_avg_sqs_inner_updated = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs);
  at::functionalization::impl::propagate_xla_data_direct(max_exp_avg_sqs_inner, max_exp_avg_sqs_inner_updated);
  auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, std::get<4>(tmp_output));
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _fused_adamw_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto grads_meta = to_meta(grads);
        auto exp_avgs_meta = to_meta(exp_avgs);
        auto exp_avg_sqs_meta = to_meta(exp_avg_sqs);
        auto max_exp_avg_sqs_meta = to_meta(max_exp_avg_sqs);
        auto state_steps_meta = to_meta(state_steps);
        auto grad_scale_meta = to_meta(grad_scale);
        auto found_inf_meta = to_meta(found_inf);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_fused_adamw_::call(self_meta, grads_meta, exp_avgs_meta, exp_avg_sqs_meta, max_exp_avg_sqs_meta, state_steps_meta, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_meta, found_inf_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> grads_;
      if (at::functionalization::impl::isFunctionalTensor(grads)) {
        at::functionalization::impl::sync(grads);
        grads_ = at::functionalization::impl::from_functional_tensor(grads);
      } else {
        grads_ = grads.vec();
      }
      
      ::std::vector<at::Tensor> exp_avgs_;
      if (at::functionalization::impl::isFunctionalTensor(exp_avgs)) {
        at::functionalization::impl::sync(exp_avgs);
        exp_avgs_ = at::functionalization::impl::from_functional_tensor(exp_avgs);
      } else {
        exp_avgs_ = exp_avgs.vec();
      }
      
      ::std::vector<at::Tensor> exp_avg_sqs_;
      if (at::functionalization::impl::isFunctionalTensor(exp_avg_sqs)) {
        at::functionalization::impl::sync(exp_avg_sqs);
        exp_avg_sqs_ = at::functionalization::impl::from_functional_tensor(exp_avg_sqs);
      } else {
        exp_avg_sqs_ = exp_avg_sqs.vec();
      }
      
      ::std::vector<at::Tensor> max_exp_avg_sqs_;
      if (at::functionalization::impl::isFunctionalTensor(max_exp_avg_sqs)) {
        at::functionalization::impl::sync(max_exp_avg_sqs);
        max_exp_avg_sqs_ = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs);
      } else {
        max_exp_avg_sqs_ = max_exp_avg_sqs.vec();
      }
      
      ::std::vector<at::Tensor> state_steps_;
      if (at::functionalization::impl::isFunctionalTensor(state_steps)) {
        at::functionalization::impl::sync(state_steps);
        state_steps_ = at::functionalization::impl::from_functional_tensor(state_steps);
      } else {
        state_steps_ = state_steps.vec();
      }
      
      ::std::optional<at::Tensor> grad_scale_;
      if (at::functionalization::impl::isFunctionalTensor(grad_scale)) {
        at::functionalization::impl::sync(grad_scale);
        grad_scale_ = at::functionalization::impl::from_functional_tensor(grad_scale);
      } else {
        grad_scale_ = grad_scale;
      }
      
      ::std::optional<at::Tensor> found_inf_;
      if (at::functionalization::impl::isFunctionalTensor(found_inf)) {
        at::functionalization::impl::sync(found_inf);
        found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf);
      } else {
        found_inf_ = found_inf;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self) && at::functionalization::impl::isFunctionalTensor(grads) && at::functionalization::impl::isFunctionalTensor(exp_avgs) && at::functionalization::impl::isFunctionalTensor(exp_avg_sqs) && at::functionalization::impl::isFunctionalTensor(max_exp_avg_sqs))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(state_steps) || at::functionalization::impl::isFunctionalTensor(grad_scale) || at::functionalization::impl::isFunctionalTensor(found_inf))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_fused_adamw_::call(self_, grads_, exp_avgs_, exp_avg_sqs_, max_exp_avg_sqs_, state_steps_, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_, found_inf_);
         
        }
      } else {
        ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_fused_adamw::call(self_, grads_, exp_avgs_, exp_avg_sqs_, max_exp_avg_sqs_, state_steps_, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_, found_inf_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
  auto grads_inner = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::replace_(grads, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(grads);
  at::functionalization::impl::sync(grads);
  auto grads_inner_updated = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::propagate_xla_data_direct(grads_inner, grads_inner_updated);
  auto exp_avgs_inner = at::functionalization::impl::from_functional_tensor(exp_avgs);
  at::functionalization::impl::replace_(exp_avgs, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(exp_avgs);
  at::functionalization::impl::sync(exp_avgs);
  auto exp_avgs_inner_updated = at::functionalization::impl::from_functional_tensor(exp_avgs);
  at::functionalization::impl::propagate_xla_data_direct(exp_avgs_inner, exp_avgs_inner_updated);
  auto exp_avg_sqs_inner = at::functionalization::impl::from_functional_tensor(exp_avg_sqs);
  at::functionalization::impl::replace_(exp_avg_sqs, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(exp_avg_sqs);
  at::functionalization::impl::sync(exp_avg_sqs);
  auto exp_avg_sqs_inner_updated = at::functionalization::impl::from_functional_tensor(exp_avg_sqs);
  at::functionalization::impl::propagate_xla_data_direct(exp_avg_sqs_inner, exp_avg_sqs_inner_updated);
  auto max_exp_avg_sqs_inner = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs);
  at::functionalization::impl::replace_(max_exp_avg_sqs, std::get<4>(tmp_output));
  at::functionalization::impl::commit_update(max_exp_avg_sqs);
  at::functionalization::impl::sync(max_exp_avg_sqs);
  auto max_exp_avg_sqs_inner_updated = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs);
  at::functionalization::impl::propagate_xla_data_direct(max_exp_avg_sqs_inner, max_exp_avg_sqs_inner_updated);
    
      }
    }

    void _fused_adamw_out_tensor_lr_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto grads_meta = to_meta(grads);
        auto exp_avgs_meta = to_meta(exp_avgs);
        auto exp_avg_sqs_meta = to_meta(exp_avg_sqs);
        auto max_exp_avg_sqs_meta = to_meta(max_exp_avg_sqs);
        auto state_steps_meta = to_meta(state_steps);
        auto lr_meta = to_meta(lr);
        auto grad_scale_meta = to_meta(grad_scale);
        auto found_inf_meta = to_meta(found_inf);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_fused_adamw_tensor_lr_out::call(self_meta, grads_meta, exp_avgs_meta, exp_avg_sqs_meta, max_exp_avg_sqs_meta, state_steps_meta, lr_meta, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_meta, found_inf_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> grads_;
      if (at::functionalization::impl::isFunctionalTensor(grads)) {
        at::functionalization::impl::sync(grads);
        grads_ = at::functionalization::impl::from_functional_tensor(grads);
      } else {
        grads_ = grads.vec();
      }
      
      ::std::vector<at::Tensor> exp_avgs_;
      if (at::functionalization::impl::isFunctionalTensor(exp_avgs)) {
        at::functionalization::impl::sync(exp_avgs);
        exp_avgs_ = at::functionalization::impl::from_functional_tensor(exp_avgs);
      } else {
        exp_avgs_ = exp_avgs.vec();
      }
      
      ::std::vector<at::Tensor> exp_avg_sqs_;
      if (at::functionalization::impl::isFunctionalTensor(exp_avg_sqs)) {
        at::functionalization::impl::sync(exp_avg_sqs);
        exp_avg_sqs_ = at::functionalization::impl::from_functional_tensor(exp_avg_sqs);
      } else {
        exp_avg_sqs_ = exp_avg_sqs.vec();
      }
      
      ::std::vector<at::Tensor> max_exp_avg_sqs_;
      if (at::functionalization::impl::isFunctionalTensor(max_exp_avg_sqs)) {
        at::functionalization::impl::sync(max_exp_avg_sqs);
        max_exp_avg_sqs_ = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs);
      } else {
        max_exp_avg_sqs_ = max_exp_avg_sqs.vec();
      }
      
      ::std::vector<at::Tensor> state_steps_;
      if (at::functionalization::impl::isFunctionalTensor(state_steps)) {
        at::functionalization::impl::sync(state_steps);
        state_steps_ = at::functionalization::impl::from_functional_tensor(state_steps);
      } else {
        state_steps_ = state_steps.vec();
      }
      
      at::Tensor lr_;
      if (at::functionalization::impl::isFunctionalTensor(lr)) {
        at::functionalization::impl::sync(lr);
        lr_ = at::functionalization::impl::from_functional_tensor(lr);
      } else {
        lr_ = lr;
      }
      
      ::std::optional<at::Tensor> grad_scale_;
      if (at::functionalization::impl::isFunctionalTensor(grad_scale)) {
        at::functionalization::impl::sync(grad_scale);
        grad_scale_ = at::functionalization::impl::from_functional_tensor(grad_scale);
      } else {
        grad_scale_ = grad_scale;
      }
      
      ::std::optional<at::Tensor> found_inf_;
      if (at::functionalization::impl::isFunctionalTensor(found_inf)) {
        at::functionalization::impl::sync(found_inf);
        found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf);
      } else {
        found_inf_ = found_inf;
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grads) && at::functionalization::impl::isFunctionalTensor(exp_avgs) && at::functionalization::impl::isFunctionalTensor(exp_avg_sqs) && at::functionalization::impl::isFunctionalTensor(max_exp_avg_sqs) && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || lr.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(state_steps) || at::functionalization::impl::isFunctionalTensor(lr) || at::functionalization::impl::isFunctionalTensor(grad_scale) || at::functionalization::impl::isFunctionalTensor(found_inf))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_fused_adamw_tensor_lr_out::call(self_, grads_, exp_avgs_, exp_avg_sqs_, max_exp_avg_sqs_, state_steps_, lr_, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_, found_inf_, out_);
         
        }
      } else {
        ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_fused_adamw_tensor_lr::call(self_, grads_, exp_avgs_, exp_avg_sqs_, max_exp_avg_sqs_, state_steps_, lr_, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_, found_inf_);
        }
          auto grads_inner = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::replace_(grads, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(grads);
  at::functionalization::impl::sync(grads);
  auto grads_inner_updated = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::propagate_xla_data_direct(grads_inner, grads_inner_updated);
  auto exp_avgs_inner = at::functionalization::impl::from_functional_tensor(exp_avgs);
  at::functionalization::impl::replace_(exp_avgs, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(exp_avgs);
  at::functionalization::impl::sync(exp_avgs);
  auto exp_avgs_inner_updated = at::functionalization::impl::from_functional_tensor(exp_avgs);
  at::functionalization::impl::propagate_xla_data_direct(exp_avgs_inner, exp_avgs_inner_updated);
  auto exp_avg_sqs_inner = at::functionalization::impl::from_functional_tensor(exp_avg_sqs);
  at::functionalization::impl::replace_(exp_avg_sqs, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(exp_avg_sqs);
  at::functionalization::impl::sync(exp_avg_sqs);
  auto exp_avg_sqs_inner_updated = at::functionalization::impl::from_functional_tensor(exp_avg_sqs);
  at::functionalization::impl::propagate_xla_data_direct(exp_avg_sqs_inner, exp_avg_sqs_inner_updated);
  auto max_exp_avg_sqs_inner = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs);
  at::functionalization::impl::replace_(max_exp_avg_sqs, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(max_exp_avg_sqs);
  at::functionalization::impl::sync(max_exp_avg_sqs);
  auto max_exp_avg_sqs_inner_updated = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs);
  at::functionalization::impl::propagate_xla_data_direct(max_exp_avg_sqs_inner, max_exp_avg_sqs_inner_updated);
  auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, std::get<4>(tmp_output));
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _fused_adamw__tensor_lr(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto grads_meta = to_meta(grads);
        auto exp_avgs_meta = to_meta(exp_avgs);
        auto exp_avg_sqs_meta = to_meta(exp_avg_sqs);
        auto max_exp_avg_sqs_meta = to_meta(max_exp_avg_sqs);
        auto state_steps_meta = to_meta(state_steps);
        auto lr_meta = to_meta(lr);
        auto grad_scale_meta = to_meta(grad_scale);
        auto found_inf_meta = to_meta(found_inf);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_fused_adamw__tensor_lr::call(self_meta, grads_meta, exp_avgs_meta, exp_avg_sqs_meta, max_exp_avg_sqs_meta, state_steps_meta, lr_meta, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_meta, found_inf_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> grads_;
      if (at::functionalization::impl::isFunctionalTensor(grads)) {
        at::functionalization::impl::sync(grads);
        grads_ = at::functionalization::impl::from_functional_tensor(grads);
      } else {
        grads_ = grads.vec();
      }
      
      ::std::vector<at::Tensor> exp_avgs_;
      if (at::functionalization::impl::isFunctionalTensor(exp_avgs)) {
        at::functionalization::impl::sync(exp_avgs);
        exp_avgs_ = at::functionalization::impl::from_functional_tensor(exp_avgs);
      } else {
        exp_avgs_ = exp_avgs.vec();
      }
      
      ::std::vector<at::Tensor> exp_avg_sqs_;
      if (at::functionalization::impl::isFunctionalTensor(exp_avg_sqs)) {
        at::functionalization::impl::sync(exp_avg_sqs);
        exp_avg_sqs_ = at::functionalization::impl::from_functional_tensor(exp_avg_sqs);
      } else {
        exp_avg_sqs_ = exp_avg_sqs.vec();
      }
      
      ::std::vector<at::Tensor> max_exp_avg_sqs_;
      if (at::functionalization::impl::isFunctionalTensor(max_exp_avg_sqs)) {
        at::functionalization::impl::sync(max_exp_avg_sqs);
        max_exp_avg_sqs_ = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs);
      } else {
        max_exp_avg_sqs_ = max_exp_avg_sqs.vec();
      }
      
      ::std::vector<at::Tensor> state_steps_;
      if (at::functionalization::impl::isFunctionalTensor(state_steps)) {
        at::functionalization::impl::sync(state_steps);
        state_steps_ = at::functionalization::impl::from_functional_tensor(state_steps);
      } else {
        state_steps_ = state_steps.vec();
      }
      
      at::Tensor lr_;
      if (at::functionalization::impl::isFunctionalTensor(lr)) {
        at::functionalization::impl::sync(lr);
        lr_ = at::functionalization::impl::from_functional_tensor(lr);
      } else {
        lr_ = lr;
      }
      
      ::std::optional<at::Tensor> grad_scale_;
      if (at::functionalization::impl::isFunctionalTensor(grad_scale)) {
        at::functionalization::impl::sync(grad_scale);
        grad_scale_ = at::functionalization::impl::from_functional_tensor(grad_scale);
      } else {
        grad_scale_ = grad_scale;
      }
      
      ::std::optional<at::Tensor> found_inf_;
      if (at::functionalization::impl::isFunctionalTensor(found_inf)) {
        at::functionalization::impl::sync(found_inf);
        found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf);
      } else {
        found_inf_ = found_inf;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self) && at::functionalization::impl::isFunctionalTensor(grads) && at::functionalization::impl::isFunctionalTensor(exp_avgs) && at::functionalization::impl::isFunctionalTensor(exp_avg_sqs) && at::functionalization::impl::isFunctionalTensor(max_exp_avg_sqs))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || lr.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(state_steps) || at::functionalization::impl::isFunctionalTensor(lr) || at::functionalization::impl::isFunctionalTensor(grad_scale) || at::functionalization::impl::isFunctionalTensor(found_inf))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_fused_adamw__tensor_lr::call(self_, grads_, exp_avgs_, exp_avg_sqs_, max_exp_avg_sqs_, state_steps_, lr_, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_, found_inf_);
         
        }
      } else {
        ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_fused_adamw_tensor_lr::call(self_, grads_, exp_avgs_, exp_avg_sqs_, max_exp_avg_sqs_, state_steps_, lr_, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_, found_inf_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
  auto grads_inner = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::replace_(grads, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(grads);
  at::functionalization::impl::sync(grads);
  auto grads_inner_updated = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::propagate_xla_data_direct(grads_inner, grads_inner_updated);
  auto exp_avgs_inner = at::functionalization::impl::from_functional_tensor(exp_avgs);
  at::functionalization::impl::replace_(exp_avgs, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(exp_avgs);
  at::functionalization::impl::sync(exp_avgs);
  auto exp_avgs_inner_updated = at::functionalization::impl::from_functional_tensor(exp_avgs);
  at::functionalization::impl::propagate_xla_data_direct(exp_avgs_inner, exp_avgs_inner_updated);
  auto exp_avg_sqs_inner = at::functionalization::impl::from_functional_tensor(exp_avg_sqs);
  at::functionalization::impl::replace_(exp_avg_sqs, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(exp_avg_sqs);
  at::functionalization::impl::sync(exp_avg_sqs);
  auto exp_avg_sqs_inner_updated = at::functionalization::impl::from_functional_tensor(exp_avg_sqs);
  at::functionalization::impl::propagate_xla_data_direct(exp_avg_sqs_inner, exp_avg_sqs_inner_updated);
  auto max_exp_avg_sqs_inner = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs);
  at::functionalization::impl::replace_(max_exp_avg_sqs, std::get<4>(tmp_output));
  at::functionalization::impl::commit_update(max_exp_avg_sqs);
  at::functionalization::impl::sync(max_exp_avg_sqs);
  auto max_exp_avg_sqs_inner_updated = at::functionalization::impl::from_functional_tensor(max_exp_avg_sqs);
  at::functionalization::impl::propagate_xla_data_direct(max_exp_avg_sqs_inner, max_exp_avg_sqs_inner_updated);
    
      }
    }

    void _fused_sgd_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto grads_meta = to_meta(grads);
        auto momentum_buffer_list_meta = to_meta(momentum_buffer_list);
        auto grad_scale_meta = to_meta(grad_scale);
        auto found_inf_meta = to_meta(found_inf);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_fused_sgd_out::call(self_meta, grads_meta, momentum_buffer_list_meta, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale_meta, found_inf_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> grads_;
      if (at::functionalization::impl::isFunctionalTensor(grads)) {
        at::functionalization::impl::sync(grads);
        grads_ = at::functionalization::impl::from_functional_tensor(grads);
      } else {
        grads_ = grads.vec();
      }
      
      ::std::vector<at::Tensor> momentum_buffer_list_;
      if (at::functionalization::impl::isFunctionalTensor(momentum_buffer_list)) {
        at::functionalization::impl::sync(momentum_buffer_list);
        momentum_buffer_list_ = at::functionalization::impl::from_functional_tensor(momentum_buffer_list);
      } else {
        momentum_buffer_list_ = momentum_buffer_list.vec();
      }
      
      ::std::optional<at::Tensor> grad_scale_;
      if (at::functionalization::impl::isFunctionalTensor(grad_scale)) {
        at::functionalization::impl::sync(grad_scale);
        grad_scale_ = at::functionalization::impl::from_functional_tensor(grad_scale);
      } else {
        grad_scale_ = grad_scale;
      }
      
      ::std::optional<at::Tensor> found_inf_;
      if (at::functionalization::impl::isFunctionalTensor(found_inf)) {
        at::functionalization::impl::sync(found_inf);
        found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf);
      } else {
        found_inf_ = found_inf;
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grads) && at::functionalization::impl::isFunctionalTensor(momentum_buffer_list) && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(grad_scale) || at::functionalization::impl::isFunctionalTensor(found_inf))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_fused_sgd_out::call(self_, grads_, momentum_buffer_list_, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale_, found_inf_, out_);
         
        }
      } else {
        ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_fused_sgd::call(self_, grads_, momentum_buffer_list_, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale_, found_inf_);
        }
          auto grads_inner = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::replace_(grads, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(grads);
  at::functionalization::impl::sync(grads);
  auto grads_inner_updated = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::propagate_xla_data_direct(grads_inner, grads_inner_updated);
  auto momentum_buffer_list_inner = at::functionalization::impl::from_functional_tensor(momentum_buffer_list);
  at::functionalization::impl::replace_(momentum_buffer_list, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(momentum_buffer_list);
  at::functionalization::impl::sync(momentum_buffer_list);
  auto momentum_buffer_list_inner_updated = at::functionalization::impl::from_functional_tensor(momentum_buffer_list);
  at::functionalization::impl::propagate_xla_data_direct(momentum_buffer_list_inner, momentum_buffer_list_inner_updated);
  auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _fused_sgd_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto grads_meta = to_meta(grads);
        auto momentum_buffer_list_meta = to_meta(momentum_buffer_list);
        auto grad_scale_meta = to_meta(grad_scale);
        auto found_inf_meta = to_meta(found_inf);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_fused_sgd_::call(self_meta, grads_meta, momentum_buffer_list_meta, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale_meta, found_inf_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> grads_;
      if (at::functionalization::impl::isFunctionalTensor(grads)) {
        at::functionalization::impl::sync(grads);
        grads_ = at::functionalization::impl::from_functional_tensor(grads);
      } else {
        grads_ = grads.vec();
      }
      
      ::std::vector<at::Tensor> momentum_buffer_list_;
      if (at::functionalization::impl::isFunctionalTensor(momentum_buffer_list)) {
        at::functionalization::impl::sync(momentum_buffer_list);
        momentum_buffer_list_ = at::functionalization::impl::from_functional_tensor(momentum_buffer_list);
      } else {
        momentum_buffer_list_ = momentum_buffer_list.vec();
      }
      
      ::std::optional<at::Tensor> grad_scale_;
      if (at::functionalization::impl::isFunctionalTensor(grad_scale)) {
        at::functionalization::impl::sync(grad_scale);
        grad_scale_ = at::functionalization::impl::from_functional_tensor(grad_scale);
      } else {
        grad_scale_ = grad_scale;
      }
      
      ::std::optional<at::Tensor> found_inf_;
      if (at::functionalization::impl::isFunctionalTensor(found_inf)) {
        at::functionalization::impl::sync(found_inf);
        found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf);
      } else {
        found_inf_ = found_inf;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self) && at::functionalization::impl::isFunctionalTensor(grads) && at::functionalization::impl::isFunctionalTensor(momentum_buffer_list))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(grad_scale) || at::functionalization::impl::isFunctionalTensor(found_inf))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_fused_sgd_::call(self_, grads_, momentum_buffer_list_, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale_, found_inf_);
         
        }
      } else {
        ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_fused_sgd::call(self_, grads_, momentum_buffer_list_, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale_, found_inf_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
  auto grads_inner = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::replace_(grads, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(grads);
  at::functionalization::impl::sync(grads);
  auto grads_inner_updated = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::propagate_xla_data_direct(grads_inner, grads_inner_updated);
  auto momentum_buffer_list_inner = at::functionalization::impl::from_functional_tensor(momentum_buffer_list);
  at::functionalization::impl::replace_(momentum_buffer_list, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(momentum_buffer_list);
  at::functionalization::impl::sync(momentum_buffer_list);
  auto momentum_buffer_list_inner_updated = at::functionalization::impl::from_functional_tensor(momentum_buffer_list);
  at::functionalization::impl::propagate_xla_data_direct(momentum_buffer_list_inner, momentum_buffer_list_inner_updated);
    
      }
    }

    void _fused_sgd_out_tensor_lr_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto grads_meta = to_meta(grads);
        auto momentum_buffer_list_meta = to_meta(momentum_buffer_list);
        auto lr_meta = to_meta(lr);
        auto grad_scale_meta = to_meta(grad_scale);
        auto found_inf_meta = to_meta(found_inf);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_fused_sgd_tensor_lr_out::call(self_meta, grads_meta, momentum_buffer_list_meta, weight_decay, momentum, lr_meta, dampening, nesterov, maximize, is_first_step, grad_scale_meta, found_inf_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> grads_;
      if (at::functionalization::impl::isFunctionalTensor(grads)) {
        at::functionalization::impl::sync(grads);
        grads_ = at::functionalization::impl::from_functional_tensor(grads);
      } else {
        grads_ = grads.vec();
      }
      
      ::std::vector<at::Tensor> momentum_buffer_list_;
      if (at::functionalization::impl::isFunctionalTensor(momentum_buffer_list)) {
        at::functionalization::impl::sync(momentum_buffer_list);
        momentum_buffer_list_ = at::functionalization::impl::from_functional_tensor(momentum_buffer_list);
      } else {
        momentum_buffer_list_ = momentum_buffer_list.vec();
      }
      
      at::Tensor lr_;
      if (at::functionalization::impl::isFunctionalTensor(lr)) {
        at::functionalization::impl::sync(lr);
        lr_ = at::functionalization::impl::from_functional_tensor(lr);
      } else {
        lr_ = lr;
      }
      
      ::std::optional<at::Tensor> grad_scale_;
      if (at::functionalization::impl::isFunctionalTensor(grad_scale)) {
        at::functionalization::impl::sync(grad_scale);
        grad_scale_ = at::functionalization::impl::from_functional_tensor(grad_scale);
      } else {
        grad_scale_ = grad_scale;
      }
      
      ::std::optional<at::Tensor> found_inf_;
      if (at::functionalization::impl::isFunctionalTensor(found_inf)) {
        at::functionalization::impl::sync(found_inf);
        found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf);
      } else {
        found_inf_ = found_inf;
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grads) && at::functionalization::impl::isFunctionalTensor(momentum_buffer_list) && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || lr.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(lr) || at::functionalization::impl::isFunctionalTensor(grad_scale) || at::functionalization::impl::isFunctionalTensor(found_inf))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_fused_sgd_tensor_lr_out::call(self_, grads_, momentum_buffer_list_, weight_decay, momentum, lr_, dampening, nesterov, maximize, is_first_step, grad_scale_, found_inf_, out_);
         
        }
      } else {
        ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_fused_sgd_tensor_lr::call(self_, grads_, momentum_buffer_list_, weight_decay, momentum, lr_, dampening, nesterov, maximize, is_first_step, grad_scale_, found_inf_);
        }
          auto grads_inner = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::replace_(grads, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(grads);
  at::functionalization::impl::sync(grads);
  auto grads_inner_updated = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::propagate_xla_data_direct(grads_inner, grads_inner_updated);
  auto momentum_buffer_list_inner = at::functionalization::impl::from_functional_tensor(momentum_buffer_list);
  at::functionalization::impl::replace_(momentum_buffer_list, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(momentum_buffer_list);
  at::functionalization::impl::sync(momentum_buffer_list);
  auto momentum_buffer_list_inner_updated = at::functionalization::impl::from_functional_tensor(momentum_buffer_list);
  at::functionalization::impl::propagate_xla_data_direct(momentum_buffer_list_inner, momentum_buffer_list_inner_updated);
  auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _fused_sgd__tensor_lr(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto grads_meta = to_meta(grads);
        auto momentum_buffer_list_meta = to_meta(momentum_buffer_list);
        auto lr_meta = to_meta(lr);
        auto grad_scale_meta = to_meta(grad_scale);
        auto found_inf_meta = to_meta(found_inf);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_fused_sgd__tensor_lr::call(self_meta, grads_meta, momentum_buffer_list_meta, weight_decay, momentum, lr_meta, dampening, nesterov, maximize, is_first_step, grad_scale_meta, found_inf_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> grads_;
      if (at::functionalization::impl::isFunctionalTensor(grads)) {
        at::functionalization::impl::sync(grads);
        grads_ = at::functionalization::impl::from_functional_tensor(grads);
      } else {
        grads_ = grads.vec();
      }
      
      ::std::vector<at::Tensor> momentum_buffer_list_;
      if (at::functionalization::impl::isFunctionalTensor(momentum_buffer_list)) {
        at::functionalization::impl::sync(momentum_buffer_list);
        momentum_buffer_list_ = at::functionalization::impl::from_functional_tensor(momentum_buffer_list);
      } else {
        momentum_buffer_list_ = momentum_buffer_list.vec();
      }
      
      at::Tensor lr_;
      if (at::functionalization::impl::isFunctionalTensor(lr)) {
        at::functionalization::impl::sync(lr);
        lr_ = at::functionalization::impl::from_functional_tensor(lr);
      } else {
        lr_ = lr;
      }
      
      ::std::optional<at::Tensor> grad_scale_;
      if (at::functionalization::impl::isFunctionalTensor(grad_scale)) {
        at::functionalization::impl::sync(grad_scale);
        grad_scale_ = at::functionalization::impl::from_functional_tensor(grad_scale);
      } else {
        grad_scale_ = grad_scale;
      }
      
      ::std::optional<at::Tensor> found_inf_;
      if (at::functionalization::impl::isFunctionalTensor(found_inf)) {
        at::functionalization::impl::sync(found_inf);
        found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf);
      } else {
        found_inf_ = found_inf;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self) && at::functionalization::impl::isFunctionalTensor(grads) && at::functionalization::impl::isFunctionalTensor(momentum_buffer_list))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false || lr.device().type() == c10::DeviceType::XLA) && (false || at::functionalization::impl::isFunctionalTensor(lr) || at::functionalization::impl::isFunctionalTensor(grad_scale) || at::functionalization::impl::isFunctionalTensor(found_inf))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_fused_sgd__tensor_lr::call(self_, grads_, momentum_buffer_list_, weight_decay, momentum, lr_, dampening, nesterov, maximize, is_first_step, grad_scale_, found_inf_);
         
        }
      } else {
        ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_fused_sgd_tensor_lr::call(self_, grads_, momentum_buffer_list_, weight_decay, momentum, lr_, dampening, nesterov, maximize, is_first_step, grad_scale_, found_inf_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
  auto grads_inner = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::replace_(grads, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(grads);
  at::functionalization::impl::sync(grads);
  auto grads_inner_updated = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::propagate_xla_data_direct(grads_inner, grads_inner_updated);
  auto momentum_buffer_list_inner = at::functionalization::impl::from_functional_tensor(momentum_buffer_list);
  at::functionalization::impl::replace_(momentum_buffer_list, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(momentum_buffer_list);
  at::functionalization::impl::sync(momentum_buffer_list);
  auto momentum_buffer_list_inner_updated = at::functionalization::impl::from_functional_tensor(momentum_buffer_list);
  at::functionalization::impl::propagate_xla_data_direct(momentum_buffer_list_inner, momentum_buffer_list_inner_updated);
    
      }
    }

    void _fused_adagrad_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList state_sums, at::TensorList state_steps, double lr, double lr_decay, double weight_decay, double eps, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf, at::TensorList out) {
      if (false && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto grads_meta = to_meta(grads);
        auto state_sums_meta = to_meta(state_sums);
        auto state_steps_meta = to_meta(state_steps);
        auto grad_scale_meta = to_meta(grad_scale);
        auto found_inf_meta = to_meta(found_inf);
        auto out_meta = to_meta(out);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_fused_adagrad_out::call(self_meta, grads_meta, state_sums_meta, state_steps_meta, lr, lr_decay, weight_decay, eps, maximize, grad_scale_meta, found_inf_meta, out_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> grads_;
      if (at::functionalization::impl::isFunctionalTensor(grads)) {
        at::functionalization::impl::sync(grads);
        grads_ = at::functionalization::impl::from_functional_tensor(grads);
      } else {
        grads_ = grads.vec();
      }
      
      ::std::vector<at::Tensor> state_sums_;
      if (at::functionalization::impl::isFunctionalTensor(state_sums)) {
        at::functionalization::impl::sync(state_sums);
        state_sums_ = at::functionalization::impl::from_functional_tensor(state_sums);
      } else {
        state_sums_ = state_sums.vec();
      }
      
      ::std::vector<at::Tensor> state_steps_;
      if (at::functionalization::impl::isFunctionalTensor(state_steps)) {
        at::functionalization::impl::sync(state_steps);
        state_steps_ = at::functionalization::impl::from_functional_tensor(state_steps);
      } else {
        state_steps_ = state_steps.vec();
      }
      
      ::std::optional<at::Tensor> grad_scale_;
      if (at::functionalization::impl::isFunctionalTensor(grad_scale)) {
        at::functionalization::impl::sync(grad_scale);
        grad_scale_ = at::functionalization::impl::from_functional_tensor(grad_scale);
      } else {
        grad_scale_ = grad_scale;
      }
      
      ::std::optional<at::Tensor> found_inf_;
      if (at::functionalization::impl::isFunctionalTensor(found_inf)) {
        at::functionalization::impl::sync(found_inf);
        found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf);
      } else {
        found_inf_ = found_inf;
      }
      
      ::std::vector<at::Tensor> out_;
      if (at::functionalization::impl::isFunctionalTensor(out)) {
        at::functionalization::impl::sync(out);
        out_ = at::functionalization::impl::from_functional_tensor(out);
      } else {
        out_ = out.vec();
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(grads) && at::functionalization::impl::isFunctionalTensor(state_sums) && at::functionalization::impl::isFunctionalTensor(state_steps) && at::functionalization::impl::isFunctionalTensor(out))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(grad_scale) || at::functionalization::impl::isFunctionalTensor(found_inf))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_fused_adagrad_out::call(self_, grads_, state_sums_, state_steps_, lr, lr_decay, weight_decay, eps, maximize, grad_scale_, found_inf_, out_);
         
        }
      } else {
        ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_fused_adagrad::call(self_, grads_, state_sums_, state_steps_, lr, lr_decay, weight_decay, eps, maximize, grad_scale_, found_inf_);
        }
          auto grads_inner = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::replace_(grads, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(grads);
  at::functionalization::impl::sync(grads);
  auto grads_inner_updated = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::propagate_xla_data_direct(grads_inner, grads_inner_updated);
  auto state_sums_inner = at::functionalization::impl::from_functional_tensor(state_sums);
  at::functionalization::impl::replace_(state_sums, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(state_sums);
  at::functionalization::impl::sync(state_sums);
  auto state_sums_inner_updated = at::functionalization::impl::from_functional_tensor(state_sums);
  at::functionalization::impl::propagate_xla_data_direct(state_sums_inner, state_sums_inner_updated);
  auto state_steps_inner = at::functionalization::impl::from_functional_tensor(state_steps);
  at::functionalization::impl::replace_(state_steps, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(state_steps);
  at::functionalization::impl::sync(state_steps);
  auto state_steps_inner_updated = at::functionalization::impl::from_functional_tensor(state_steps);
  at::functionalization::impl::propagate_xla_data_direct(state_steps_inner, state_steps_inner_updated);
  auto out_inner = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::replace_(out, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(out);
  at::functionalization::impl::sync(out);
  auto out_inner_updated = at::functionalization::impl::from_functional_tensor(out);
  at::functionalization::impl::propagate_xla_data_direct(out_inner, out_inner_updated);
    
      }
    }

    void _fused_adagrad_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList state_sums, at::TensorList state_steps, double lr, double lr_decay, double weight_decay, double eps, bool maximize, const ::std::optional<at::Tensor> & grad_scale, const ::std::optional<at::Tensor> & found_inf) {
      if (true && !disable_meta_reference()) {
        // Before converting the mutable op to its functional variant, run meta tensors through the original op.
        // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants.
        // (We can only do this for inplace ops today though, because they technically all support meta tensors).
        auto self_meta = to_meta(self);
        auto grads_meta = to_meta(grads);
        auto state_sums_meta = to_meta(state_sums);
        auto state_steps_meta = to_meta(state_steps);
        auto grad_scale_meta = to_meta(grad_scale);
        auto found_inf_meta = to_meta(found_inf);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        at::_ops::_fused_adagrad_::call(self_meta, grads_meta, state_sums_meta, state_steps_meta, lr, lr_decay, weight_decay, eps, maximize, grad_scale_meta, found_inf_meta);
      }
      
      ::std::vector<at::Tensor> self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        at::functionalization::impl::sync(self);
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self.vec();
      }
      
      ::std::vector<at::Tensor> grads_;
      if (at::functionalization::impl::isFunctionalTensor(grads)) {
        at::functionalization::impl::sync(grads);
        grads_ = at::functionalization::impl::from_functional_tensor(grads);
      } else {
        grads_ = grads.vec();
      }
      
      ::std::vector<at::Tensor> state_sums_;
      if (at::functionalization::impl::isFunctionalTensor(state_sums)) {
        at::functionalization::impl::sync(state_sums);
        state_sums_ = at::functionalization::impl::from_functional_tensor(state_sums);
      } else {
        state_sums_ = state_sums.vec();
      }
      
      ::std::vector<at::Tensor> state_steps_;
      if (at::functionalization::impl::isFunctionalTensor(state_steps)) {
        at::functionalization::impl::sync(state_steps);
        state_steps_ = at::functionalization::impl::from_functional_tensor(state_steps);
      } else {
        state_steps_ = state_steps.vec();
      }
      
      ::std::optional<at::Tensor> grad_scale_;
      if (at::functionalization::impl::isFunctionalTensor(grad_scale)) {
        at::functionalization::impl::sync(grad_scale);
        grad_scale_ = at::functionalization::impl::from_functional_tensor(grad_scale);
      } else {
        grad_scale_ = grad_scale;
      }
      
      ::std::optional<at::Tensor> found_inf_;
      if (at::functionalization::impl::isFunctionalTensor(found_inf)) {
        at::functionalization::impl::sync(found_inf);
        found_inf_ = at::functionalization::impl::from_functional_tensor(found_inf);
      } else {
        found_inf_ = found_inf;
      }
      if (!(true && at::functionalization::impl::isFunctionalTensor(self) && at::functionalization::impl::isFunctionalTensor(grads) && at::functionalization::impl::isFunctionalTensor(state_sums) && at::functionalization::impl::isFunctionalTensor(state_steps))) {
        // We want to disable this check if there are any XLA tensors.
        // cpu_tensor.copy_(xla_tensor) is valid code.
        if (!(false) && (false || at::functionalization::impl::isFunctionalTensor(grad_scale) || at::functionalization::impl::isFunctionalTensor(found_inf))) {
         // case 1: trying to mutate a non functional tensor with a functional tensor is an error
         TORCH_INTERNAL_ASSERT(false,
           "mutating a non-functional tensor with a functional tensor is not allowed.",
           " Please ensure that all of your inputs are wrapped inside of a functionalize() call.");
        } else {
         // case 2: arguments are not functional tensors, so we no-op and redispatch.
         at::AutoDispatchSkipFunctionalize guard;
         at::_ops::_fused_adagrad_::call(self_, grads_, state_sums_, state_steps_, lr, lr_decay, weight_decay, eps, maximize, grad_scale_, found_inf_);
         
        }
      } else {
        ::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> tmp_output;
        {
          at::AutoDispatchSkipFunctionalize guard;
          tmp_output = at::_ops::_fused_adagrad::call(self_, grads_, state_sums_, state_steps_, lr, lr_decay, weight_decay, eps, maximize, grad_scale_, found_inf_);
        }
          auto self_inner = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::replace_(self, std::get<0>(tmp_output));
  at::functionalization::impl::commit_update(self);
  at::functionalization::impl::sync(self);
  auto self_inner_updated = at::functionalization::impl::from_functional_tensor(self);
  at::functionalization::impl::propagate_xla_data_direct(self_inner, self_inner_updated);
  auto grads_inner = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::replace_(grads, std::get<1>(tmp_output));
  at::functionalization::impl::commit_update(grads);
  at::functionalization::impl::sync(grads);
  auto grads_inner_updated = at::functionalization::impl::from_functional_tensor(grads);
  at::functionalization::impl::propagate_xla_data_direct(grads_inner, grads_inner_updated);
  auto state_sums_inner = at::functionalization::impl::from_functional_tensor(state_sums);
  at::functionalization::impl::replace_(state_sums, std::get<2>(tmp_output));
  at::functionalization::impl::commit_update(state_sums);
  at::functionalization::impl::sync(state_sums);
  auto state_sums_inner_updated = at::functionalization::impl::from_functional_tensor(state_sums);
  at::functionalization::impl::propagate_xla_data_direct(state_sums_inner, state_sums_inner_updated);
  auto state_steps_inner = at::functionalization::impl::from_functional_tensor(state_steps);
  at::functionalization::impl::replace_(state_steps, std::get<3>(tmp_output));
  at::functionalization::impl::commit_update(state_steps);
  at::functionalization::impl::sync(state_steps);
  auto state_steps_inner_updated = at::functionalization::impl::from_functional_tensor(state_steps);
  at::functionalization::impl::propagate_xla_data_direct(state_steps_inner, state_steps_inner_updated);
    
      }
    }

    at::Tensor _fw_primal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t level) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::_fw_primal::call(self_, level);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::_fw_primal::call(self_meta, level);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::_fw_primal::call(self_, level);
        } else {
          tmp_output = at::_ops::_fw_primal_copy::call(self_, level);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, level = level](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::_fw_primal::call(base, level);
          } else {
            return at::_ops::_fw_primal_copy::call(base, level);
          }
        },
        [inverse_return_mode = inverse_return_mode, level = level](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::_fw_primal_inverse(base, mutated_view, inverse_return_mode, level);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor _make_dual(c10::DispatchKeySet dispatchKeySet, const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
      
      at::Tensor primal_;
      if (at::functionalization::impl::isFunctionalTensor(primal)) {
        
        primal_ = at::functionalization::impl::from_functional_tensor(primal);
      } else {
        primal_ = primal;
      }
      
      at::Tensor tangent_;
      if (at::functionalization::impl::isFunctionalTensor(tangent)) {
        
        tangent_ = at::functionalization::impl::from_functional_tensor(tangent);
      } else {
        tangent_ = tangent;
      }
      if (!at::functionalization::impl::isFunctionalTensor(primal)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::_make_dual::call(primal_, tangent_, level);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        primal.key_set().has_backend(c10::BackendComponent::XLABit) ||
        primal.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto primal_meta = to_meta(primal);
        auto tangent_meta = to_meta(tangent);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::_make_dual::call(primal_meta, tangent_meta, level);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::_make_dual::call(primal_, tangent_, level);
        } else {
          tmp_output = at::_ops::_make_dual_copy::call(primal_, tangent_, level);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, tangent = tangent, level = level](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::_make_dual::call(base, tangent, level);
          } else {
            return at::_ops::_make_dual_copy::call(base, tangent, level);
          }
        },
        [inverse_return_mode = inverse_return_mode, tangent = tangent, level = level](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::_make_dual_inverse(base, mutated_view, inverse_return_mode, tangent, level);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, primal, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor view_as_real(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::view_as_real::call(self_);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::view_as_real::call(self_meta);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::view_as_real::call(self_);
        } else {
          tmp_output = at::_ops::view_as_real_copy::call(self_);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::view_as_real::call(base);
          } else {
            return at::_ops::view_as_real_copy::call(base);
          }
        },
        [inverse_return_mode = inverse_return_mode](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::view_as_real_inverse(base, mutated_view, inverse_return_mode);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor view_as_complex(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::view_as_complex::call(self_);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::view_as_complex::call(self_meta);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::view_as_complex::call(self_);
        } else {
          tmp_output = at::_ops::view_as_complex_copy::call(self_);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::view_as_complex::call(base);
          } else {
            return at::_ops::view_as_complex_copy::call(base);
          }
        },
        [inverse_return_mode = inverse_return_mode](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::view_as_complex_inverse(base, mutated_view, inverse_return_mode);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor _conj(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::_conj::call(self_);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::_conj::call(self_meta);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::_conj::call(self_);
        } else {
          tmp_output = at::_ops::_conj_copy::call(self_);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::_conj::call(base);
          } else {
            return at::_ops::_conj_copy::call(base);
          }
        },
        [inverse_return_mode = inverse_return_mode](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::_conj_inverse(base, mutated_view, inverse_return_mode);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor _neg_view(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::_neg_view::call(self_);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::_neg_view::call(self_meta);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::_neg_view::call(self_);
        } else {
          tmp_output = at::_ops::_neg_view_copy::call(self_);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::_neg_view::call(base);
          } else {
            return at::_ops::_neg_view_copy::call(base);
          }
        },
        [inverse_return_mode = inverse_return_mode](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::_neg_view_inverse(base, mutated_view, inverse_return_mode);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor as_strided(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::as_strided::call(self_, size, stride, storage_offset);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::as_strided::call(self_meta, size, stride, storage_offset);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::as_strided::call(self_, size, stride, storage_offset);
        } else {
          tmp_output = at::_ops::as_strided_copy::call(self_, size, stride, storage_offset);
        }
      }
      
      bool has_symbolic_inputs = false;
      has_symbolic_inputs = has_symbolic_inputs | (std::any_of(size.begin(), size.end(), [=](auto& arg) { return arg.is_symbolic(); }));
      has_symbolic_inputs = has_symbolic_inputs | (std::any_of(stride.begin(), stride.end(), [=](auto& arg) { return arg.is_symbolic(); }));
      has_symbolic_inputs = has_symbolic_inputs | (storage_offset.has_value() ? (*storage_offset).is_symbolic() : false);
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, size = size.vec(), stride = stride.vec(), storage_offset = storage_offset](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::as_strided::call(base, size, stride, storage_offset);
          } else {
            return at::_ops::as_strided_copy::call(base, size, stride, storage_offset);
          }
        },
        [inverse_return_mode = inverse_return_mode, size = size.vec(), stride = stride.vec(), storage_offset = storage_offset](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::as_strided_inverse(base, mutated_view, inverse_return_mode, size, stride, storage_offset);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/true
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    const at::Tensor & as_strided_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset) {
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::as_strided_::call(self_, size, stride, storage_offset);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      
      bool has_symbolic_inputs = false;
      has_symbolic_inputs = has_symbolic_inputs | (std::any_of(size.begin(), size.end(), [=](auto& arg) { return arg.is_symbolic(); }));
      has_symbolic_inputs = has_symbolic_inputs | (std::any_of(stride.begin(), stride.end(), [=](auto& arg) { return arg.is_symbolic(); }));
      has_symbolic_inputs = has_symbolic_inputs | (storage_offset.has_value() ? (*storage_offset).is_symbolic() : false);
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, size = size.vec(), stride = stride.vec(), storage_offset = storage_offset](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::as_strided::call(base, size, stride, storage_offset);
          } else {
            return at::_ops::as_strided_copy::call(base, size, stride, storage_offset);
          }
        },
        [inverse_return_mode = inverse_return_mode, size = size.vec(), stride = stride.vec(), storage_offset = storage_offset](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::as_strided_inverse(base, mutated_view, inverse_return_mode, size, stride, storage_offset);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::as_strided_::call(self_meta, size, stride, storage_offset);
      }
      // This function adds the above view meta to the current tensor and replays them off the base,
      // mutating the size/stride info of the current FunctionalTensorWrapper.
      // Because of this, we need to make sure to run the reference shape function above,
      // BEFORE doing this (otherwise we'll end up runnin the reference function using the wrong sizes/strides)
      at::functionalization::impl::mutate_view_meta(self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      // XLA/LTC don't implement the logic to propagate strides correctly, so we need to rely
      // on a reference implementation here (instead of relying on the output from the forward lambda
      // having the correct stride info)
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(self, reference_tensor_output);
      }
      return self;
    }

    at::Tensor _sparse_broadcast_to(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::_sparse_broadcast_to::call(self_, size);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::_sparse_broadcast_to::call(self_meta, size);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::_sparse_broadcast_to::call(self_, size);
        } else {
          tmp_output = at::_ops::_sparse_broadcast_to_copy::call(self_, size);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, size = size.vec()](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::_sparse_broadcast_to::call(base, size);
          } else {
            return at::_ops::_sparse_broadcast_to_copy::call(base, size);
          }
        },
        [inverse_return_mode = inverse_return_mode, size = size.vec()](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::_sparse_broadcast_to_inverse(base, mutated_view, inverse_return_mode, size);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor diagonal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::diagonal::call(self_, offset, dim1, dim2);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::diagonal::call(self_meta, offset, dim1, dim2);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::diagonal::call(self_, offset, dim1, dim2);
        } else {
          tmp_output = at::_ops::diagonal_copy::call(self_, offset, dim1, dim2);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, offset = offset, dim1 = dim1, dim2 = dim2](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::diagonal::call(base, offset, dim1, dim2);
          } else {
            return at::_ops::diagonal_copy::call(base, offset, dim1, dim2);
          }
        },
        [inverse_return_mode = inverse_return_mode, offset = offset, dim1 = dim1, dim2 = dim2](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::diagonal_inverse(base, mutated_view, inverse_return_mode, offset, dim1, dim2);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor expand(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::expand::call(self_, size, implicit);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::expand::call(self_meta, size, implicit);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::expand::call(self_, size, implicit);
        } else {
          tmp_output = at::_ops::expand_copy::call(self_, size, implicit);
        }
      }
      
      bool has_symbolic_inputs = false;
      has_symbolic_inputs = has_symbolic_inputs | (std::any_of(size.begin(), size.end(), [=](auto& arg) { return arg.is_symbolic(); }));
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, size = size.vec(), implicit = implicit](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::expand::call(base, size, implicit);
          } else {
            return at::_ops::expand_copy::call(base, size, implicit);
          }
        },
        [inverse_return_mode = inverse_return_mode, size = size.vec(), implicit = implicit](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::expand_inverse(base, mutated_view, inverse_return_mode, size, implicit);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor permute(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::permute::call(self_, dims);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::permute::call(self_meta, dims);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::permute::call(self_, dims);
        } else {
          tmp_output = at::_ops::permute_copy::call(self_, dims);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, dims = dims.vec()](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::permute::call(base, dims);
          } else {
            return at::_ops::permute_copy::call(base, dims);
          }
        },
        [inverse_return_mode = inverse_return_mode, dims = dims.vec()](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::permute_inverse(base, mutated_view, inverse_return_mode, dims);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor _reshape_alias(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::_reshape_alias::call(self_, size, stride);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::_reshape_alias::call(self_meta, size, stride);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::_reshape_alias::call(self_, size, stride);
        } else {
          tmp_output = at::_ops::_reshape_alias_copy::call(self_, size, stride);
        }
      }
      
      bool has_symbolic_inputs = false;
      has_symbolic_inputs = has_symbolic_inputs | (std::any_of(size.begin(), size.end(), [=](auto& arg) { return arg.is_symbolic(); }));
      has_symbolic_inputs = has_symbolic_inputs | (std::any_of(stride.begin(), stride.end(), [=](auto& arg) { return arg.is_symbolic(); }));
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, size = size.vec(), stride = stride.vec()](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::_reshape_alias::call(base, size, stride);
          } else {
            return at::_ops::_reshape_alias_copy::call(base, size, stride);
          }
        },
        [inverse_return_mode = inverse_return_mode, size = size.vec(), stride = stride.vec()](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::_reshape_alias_inverse(base, mutated_view, inverse_return_mode, size, stride);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor select_int(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt index) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::select_int::call(self_, dim, index);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::select_int::call(self_meta, dim, index);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::select_int::call(self_, dim, index);
        } else {
          tmp_output = at::_ops::select_copy_int::call(self_, dim, index);
        }
      }
      
      bool has_symbolic_inputs = false;
      has_symbolic_inputs = has_symbolic_inputs | (index.is_symbolic());
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, dim = dim, index = index](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::select_int::call(base, dim, index);
          } else {
            return at::_ops::select_copy_int::call(base, dim, index);
          }
        },
        [inverse_return_mode = inverse_return_mode, dim = dim, index = index](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::select_int_inverse(base, mutated_view, inverse_return_mode, dim, index);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor detach(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::detach::call(self_);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::detach::call(self_meta);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::detach::call(self_);
        } else {
          tmp_output = at::_ops::detach_copy::call(self_);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::detach::call(base);
          } else {
            return at::_ops::detach_copy::call(base);
          }
        },
        [inverse_return_mode = inverse_return_mode](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::detach_inverse(base, mutated_view, inverse_return_mode);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor & detach_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::detach_::call(self_);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::detach::call(base);
          } else {
            return at::_ops::detach_copy::call(base);
          }
        },
        [inverse_return_mode = inverse_return_mode](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::detach_inverse(base, mutated_view, inverse_return_mode);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::detach_::call(self_meta);
      }
      // This function adds the above view meta to the current tensor and replays them off the base,
      // mutating the size/stride info of the current FunctionalTensorWrapper.
      // Because of this, we need to make sure to run the reference shape function above,
      // BEFORE doing this (otherwise we'll end up runnin the reference function using the wrong sizes/strides)
      at::functionalization::impl::mutate_view_meta(self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      // XLA/LTC don't implement the logic to propagate strides correctly, so we need to rely
      // on a reference implementation here (instead of relying on the output from the forward lambda
      // having the correct stride info)
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(self, reference_tensor_output);
      }
      return self;
    }

    at::Tensor slice_Tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<c10::SymInt> start, ::std::optional<c10::SymInt> end, c10::SymInt step) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::slice_Tensor::call(self_, dim, start, end, step);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::slice_Tensor::call(self_meta, dim, start, end, step);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::slice_Tensor::call(self_, dim, start, end, step);
        } else {
          tmp_output = at::_ops::slice_copy_Tensor::call(self_, dim, start, end, step);
        }
      }
      
      bool has_symbolic_inputs = false;
      has_symbolic_inputs = has_symbolic_inputs | (start.has_value() ? (*start).is_symbolic() : false);
      has_symbolic_inputs = has_symbolic_inputs | (end.has_value() ? (*end).is_symbolic() : false);
      has_symbolic_inputs = has_symbolic_inputs | (step.is_symbolic());
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, dim = dim, start = start, end = end, step = step](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::slice_Tensor::call(base, dim, start, end, step);
          } else {
            return at::_ops::slice_copy_Tensor::call(base, dim, start, end, step);
          }
        },
        [inverse_return_mode = inverse_return_mode, dim = dim, start = start, end = end, step = step](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::slice_Tensor_inverse(base, mutated_view, inverse_return_mode, dim, start, end, step);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor slice_inverse(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim, ::std::optional<c10::SymInt> start, ::std::optional<c10::SymInt> end, c10::SymInt step) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor src_;
      if (at::functionalization::impl::isFunctionalTensor(src)) {
        
        src_ = at::functionalization::impl::from_functional_tensor(src);
      } else {
        src_ = src;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::slice_inverse::call(self_, src_, dim, start, end, step);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        auto src_meta = to_meta(src);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::slice_inverse::call(self_meta, src_meta, dim, start, end, step);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::slice_inverse::call(self_, src_, dim, start, end, step);
        } else {
          tmp_output = at::_ops::slice_scatter::call(self_, src_, dim, start, end, step);
        }
      }
      
      bool has_symbolic_inputs = false;
      has_symbolic_inputs = has_symbolic_inputs | (start.has_value() ? (*start).is_symbolic() : false);
      has_symbolic_inputs = has_symbolic_inputs | (end.has_value() ? (*end).is_symbolic() : false);
      has_symbolic_inputs = has_symbolic_inputs | (step.is_symbolic());
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, src = src, dim = dim, start = start, end = end, step = step](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::slice_inverse::call(base, src, dim, start, end, step);
          } else {
            return at::_ops::slice_scatter::call(base, src, dim, start, end, step);
          }
        },
        [inverse_return_mode = inverse_return_mode, src = src, dim = dim, start = start, end = end, step = step](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::slice_inverse_inverse(base, mutated_view, inverse_return_mode, src, dim, start, end, step);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    ::std::vector<at::Tensor> split_Tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::split_Tensor::call(self_, split_size, dim);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      ::std::vector<at::Tensor> reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::split_Tensor::call(self_meta, split_size, dim);
      }
      ::std::vector<at::Tensor> tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::split_Tensor::call(self_, split_size, dim);
        } else {
          tmp_output = at::_ops::split_copy_Tensor::call(self_, split_size, dim);
        }
      }
      
      bool has_symbolic_inputs = false;
      has_symbolic_inputs = has_symbolic_inputs | (split_size.is_symbolic());
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, split_size = split_size, dim = dim](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::split_Tensor::call(base, split_size, dim)[mutated_view_idx];
          } else {
            return at::_ops::split_copy_Tensor::call(base, split_size, dim)[mutated_view_idx];
          }
        },
        [inverse_return_mode = inverse_return_mode, split_size = split_size, dim = dim](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::split_Tensor_inverse(base, mutated_view, inverse_return_mode, mutated_view_idx, split_size, dim);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/true,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    ::std::vector<at::Tensor> split_with_sizes(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::split_with_sizes::call(self_, split_sizes, dim);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      ::std::vector<at::Tensor> reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::split_with_sizes::call(self_meta, split_sizes, dim);
      }
      ::std::vector<at::Tensor> tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::split_with_sizes::call(self_, split_sizes, dim);
        } else {
          tmp_output = at::_ops::split_with_sizes_copy::call(self_, split_sizes, dim);
        }
      }
      
      bool has_symbolic_inputs = false;
      has_symbolic_inputs = has_symbolic_inputs | (std::any_of(split_sizes.begin(), split_sizes.end(), [=](auto& arg) { return arg.is_symbolic(); }));
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, split_sizes = split_sizes.vec(), dim = dim](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::split_with_sizes::call(base, split_sizes, dim)[mutated_view_idx];
          } else {
            return at::_ops::split_with_sizes_copy::call(base, split_sizes, dim)[mutated_view_idx];
          }
        },
        [inverse_return_mode = inverse_return_mode, split_sizes = split_sizes.vec(), dim = dim](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::split_with_sizes_inverse(base, mutated_view, inverse_return_mode, mutated_view_idx, split_sizes, dim);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/true,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor squeeze(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::squeeze::call(self_);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::squeeze::call(self_meta);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::squeeze::call(self_);
        } else {
          tmp_output = at::_ops::squeeze_copy::call(self_);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::squeeze::call(base);
          } else {
            return at::_ops::squeeze_copy::call(base);
          }
        },
        [inverse_return_mode = inverse_return_mode](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::squeeze_inverse(base, mutated_view, inverse_return_mode);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor & squeeze_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::squeeze_::call(self_);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::squeeze::call(base);
          } else {
            return at::_ops::squeeze_copy::call(base);
          }
        },
        [inverse_return_mode = inverse_return_mode](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::squeeze_inverse(base, mutated_view, inverse_return_mode);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::squeeze_::call(self_meta);
      }
      // This function adds the above view meta to the current tensor and replays them off the base,
      // mutating the size/stride info of the current FunctionalTensorWrapper.
      // Because of this, we need to make sure to run the reference shape function above,
      // BEFORE doing this (otherwise we'll end up runnin the reference function using the wrong sizes/strides)
      at::functionalization::impl::mutate_view_meta(self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      // XLA/LTC don't implement the logic to propagate strides correctly, so we need to rely
      // on a reference implementation here (instead of relying on the output from the forward lambda
      // having the correct stride info)
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(self, reference_tensor_output);
      }
      return self;
    }

    at::Tensor squeeze_dim(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::squeeze_dim::call(self_, dim);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::squeeze_dim::call(self_meta, dim);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::squeeze_dim::call(self_, dim);
        } else {
          tmp_output = at::_ops::squeeze_copy_dim::call(self_, dim);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, dim = dim](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::squeeze_dim::call(base, dim);
          } else {
            return at::_ops::squeeze_copy_dim::call(base, dim);
          }
        },
        [inverse_return_mode = inverse_return_mode, dim = dim](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::squeeze_dim_inverse(base, mutated_view, inverse_return_mode, dim);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor & squeeze__dim(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim) {
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::squeeze__dim::call(self_, dim);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, dim = dim](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::squeeze_dim::call(base, dim);
          } else {
            return at::_ops::squeeze_copy_dim::call(base, dim);
          }
        },
        [inverse_return_mode = inverse_return_mode, dim = dim](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::squeeze_dim_inverse(base, mutated_view, inverse_return_mode, dim);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::squeeze__dim::call(self_meta, dim);
      }
      // This function adds the above view meta to the current tensor and replays them off the base,
      // mutating the size/stride info of the current FunctionalTensorWrapper.
      // Because of this, we need to make sure to run the reference shape function above,
      // BEFORE doing this (otherwise we'll end up runnin the reference function using the wrong sizes/strides)
      at::functionalization::impl::mutate_view_meta(self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      // XLA/LTC don't implement the logic to propagate strides correctly, so we need to rely
      // on a reference implementation here (instead of relying on the output from the forward lambda
      // having the correct stride info)
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(self, reference_tensor_output);
      }
      return self;
    }

    at::Tensor squeeze_dims(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::squeeze_dims::call(self_, dim);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::squeeze_dims::call(self_meta, dim);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::squeeze_dims::call(self_, dim);
        } else {
          tmp_output = at::_ops::squeeze_copy_dims::call(self_, dim);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, dim = dim.vec()](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::squeeze_dims::call(base, dim);
          } else {
            return at::_ops::squeeze_copy_dims::call(base, dim);
          }
        },
        [inverse_return_mode = inverse_return_mode, dim = dim.vec()](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::squeeze_dims_inverse(base, mutated_view, inverse_return_mode, dim);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor & squeeze__dims(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::IntArrayRef dim) {
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::squeeze__dims::call(self_, dim);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, dim = dim.vec()](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::squeeze_dims::call(base, dim);
          } else {
            return at::_ops::squeeze_copy_dims::call(base, dim);
          }
        },
        [inverse_return_mode = inverse_return_mode, dim = dim.vec()](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::squeeze_dims_inverse(base, mutated_view, inverse_return_mode, dim);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::squeeze__dims::call(self_meta, dim);
      }
      // This function adds the above view meta to the current tensor and replays them off the base,
      // mutating the size/stride info of the current FunctionalTensorWrapper.
      // Because of this, we need to make sure to run the reference shape function above,
      // BEFORE doing this (otherwise we'll end up runnin the reference function using the wrong sizes/strides)
      at::functionalization::impl::mutate_view_meta(self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      // XLA/LTC don't implement the logic to propagate strides correctly, so we need to rely
      // on a reference implementation here (instead of relying on the output from the forward lambda
      // having the correct stride info)
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(self, reference_tensor_output);
      }
      return self;
    }

    at::Tensor t(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::t::call(self_);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::t::call(self_meta);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::t::call(self_);
        } else {
          tmp_output = at::_ops::t_copy::call(self_);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::t::call(base);
          } else {
            return at::_ops::t_copy::call(base);
          }
        },
        [inverse_return_mode = inverse_return_mode](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::t_inverse(base, mutated_view, inverse_return_mode);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor & t_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::t_::call(self_);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::t::call(base);
          } else {
            return at::_ops::t_copy::call(base);
          }
        },
        [inverse_return_mode = inverse_return_mode](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::t_inverse(base, mutated_view, inverse_return_mode);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::t_::call(self_meta);
      }
      // This function adds the above view meta to the current tensor and replays them off the base,
      // mutating the size/stride info of the current FunctionalTensorWrapper.
      // Because of this, we need to make sure to run the reference shape function above,
      // BEFORE doing this (otherwise we'll end up runnin the reference function using the wrong sizes/strides)
      at::functionalization::impl::mutate_view_meta(self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      // XLA/LTC don't implement the logic to propagate strides correctly, so we need to rely
      // on a reference implementation here (instead of relying on the output from the forward lambda
      // having the correct stride info)
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(self, reference_tensor_output);
      }
      return self;
    }

    at::Tensor transpose_int(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::transpose_int::call(self_, dim0, dim1);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::transpose_int::call(self_meta, dim0, dim1);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::transpose_int::call(self_, dim0, dim1);
        } else {
          tmp_output = at::_ops::transpose_copy_int::call(self_, dim0, dim1);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, dim0 = dim0, dim1 = dim1](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::transpose_int::call(base, dim0, dim1);
          } else {
            return at::_ops::transpose_copy_int::call(base, dim0, dim1);
          }
        },
        [inverse_return_mode = inverse_return_mode, dim0 = dim0, dim1 = dim1](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::transpose_int_inverse(base, mutated_view, inverse_return_mode, dim0, dim1);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor & transpose_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim0, int64_t dim1) {
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::transpose_::call(self_, dim0, dim1);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, dim0 = dim0, dim1 = dim1](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::transpose_int::call(base, dim0, dim1);
          } else {
            return at::_ops::transpose_copy_int::call(base, dim0, dim1);
          }
        },
        [inverse_return_mode = inverse_return_mode, dim0 = dim0, dim1 = dim1](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::transpose_int_inverse(base, mutated_view, inverse_return_mode, dim0, dim1);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::transpose_::call(self_meta, dim0, dim1);
      }
      // This function adds the above view meta to the current tensor and replays them off the base,
      // mutating the size/stride info of the current FunctionalTensorWrapper.
      // Because of this, we need to make sure to run the reference shape function above,
      // BEFORE doing this (otherwise we'll end up runnin the reference function using the wrong sizes/strides)
      at::functionalization::impl::mutate_view_meta(self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      // XLA/LTC don't implement the logic to propagate strides correctly, so we need to rely
      // on a reference implementation here (instead of relying on the output from the forward lambda
      // having the correct stride info)
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(self, reference_tensor_output);
      }
      return self;
    }

    at::Tensor _nested_view_from_buffer(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor nested_size_;
      if (at::functionalization::impl::isFunctionalTensor(nested_size)) {
        
        nested_size_ = at::functionalization::impl::from_functional_tensor(nested_size);
      } else {
        nested_size_ = nested_size;
      }
      
      at::Tensor nested_strides_;
      if (at::functionalization::impl::isFunctionalTensor(nested_strides)) {
        
        nested_strides_ = at::functionalization::impl::from_functional_tensor(nested_strides);
      } else {
        nested_strides_ = nested_strides;
      }
      
      at::Tensor offsets_;
      if (at::functionalization::impl::isFunctionalTensor(offsets)) {
        
        offsets_ = at::functionalization::impl::from_functional_tensor(offsets);
      } else {
        offsets_ = offsets;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::_nested_view_from_buffer::call(self_, nested_size_, nested_strides_, offsets_);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        auto nested_size_meta = to_meta(nested_size);
        auto nested_strides_meta = to_meta(nested_strides);
        auto offsets_meta = to_meta(offsets);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::_nested_view_from_buffer::call(self_meta, nested_size_meta, nested_strides_meta, offsets_meta);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::_nested_view_from_buffer::call(self_, nested_size_, nested_strides_, offsets_);
        } else {
          tmp_output = at::_ops::_nested_view_from_buffer_copy::call(self_, nested_size_, nested_strides_, offsets_);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, nested_size = nested_size, nested_strides = nested_strides, offsets = offsets](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::_nested_view_from_buffer::call(base, nested_size, nested_strides, offsets);
          } else {
            return at::_ops::_nested_view_from_buffer_copy::call(base, nested_size, nested_strides, offsets);
          }
        },
        [inverse_return_mode = inverse_return_mode, nested_size = nested_size, nested_strides = nested_strides, offsets = offsets](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::_nested_view_from_buffer_inverse(base, mutated_view, inverse_return_mode, nested_size, nested_strides, offsets);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor _nested_view_from_jagged(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const ::std::optional<at::Tensor> & lengths, int64_t ragged_idx, const ::std::optional<at::Tensor> & min_seqlen, const ::std::optional<at::Tensor> & max_seqlen) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      
      at::Tensor offsets_;
      if (at::functionalization::impl::isFunctionalTensor(offsets)) {
        
        offsets_ = at::functionalization::impl::from_functional_tensor(offsets);
      } else {
        offsets_ = offsets;
      }
      
      at::Tensor dummy_;
      if (at::functionalization::impl::isFunctionalTensor(dummy)) {
        
        dummy_ = at::functionalization::impl::from_functional_tensor(dummy);
      } else {
        dummy_ = dummy;
      }
      
      ::std::optional<at::Tensor> lengths_;
      if (at::functionalization::impl::isFunctionalTensor(lengths)) {
        
        lengths_ = at::functionalization::impl::from_functional_tensor(lengths);
      } else {
        lengths_ = lengths;
      }
      
      ::std::optional<at::Tensor> min_seqlen_;
      if (at::functionalization::impl::isFunctionalTensor(min_seqlen)) {
        
        min_seqlen_ = at::functionalization::impl::from_functional_tensor(min_seqlen);
      } else {
        min_seqlen_ = min_seqlen;
      }
      
      ::std::optional<at::Tensor> max_seqlen_;
      if (at::functionalization::impl::isFunctionalTensor(max_seqlen)) {
        
        max_seqlen_ = at::functionalization::impl::from_functional_tensor(max_seqlen);
      } else {
        max_seqlen_ = max_seqlen;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::_nested_view_from_jagged::call(self_, offsets_, dummy_, lengths_, ragged_idx, min_seqlen_, max_seqlen_);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        auto offsets_meta = to_meta(offsets);
        auto dummy_meta = to_meta(dummy);
        auto lengths_meta = to_meta(lengths);
        auto min_seqlen_meta = to_meta(min_seqlen);
        auto max_seqlen_meta = to_meta(max_seqlen);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::_nested_view_from_jagged::call(self_meta, offsets_meta, dummy_meta, lengths_meta, ragged_idx, min_seqlen_meta, max_seqlen_meta);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::_nested_view_from_jagged::call(self_, offsets_, dummy_, lengths_, ragged_idx, min_seqlen_, max_seqlen_);
        } else {
          tmp_output = at::_ops::_nested_view_from_jagged_copy::call(self_, offsets_, dummy_, lengths_, ragged_idx, min_seqlen_, max_seqlen_);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, offsets = offsets, dummy = dummy, lengths = lengths, ragged_idx = ragged_idx, min_seqlen = min_seqlen, max_seqlen = max_seqlen](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::_nested_view_from_jagged::call(base, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen);
          } else {
            return at::_ops::_nested_view_from_jagged_copy::call(base, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen);
          }
        },
        [inverse_return_mode = inverse_return_mode, offsets = offsets, dummy = dummy, lengths = lengths, ragged_idx = ragged_idx, min_seqlen = min_seqlen, max_seqlen = max_seqlen](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::_nested_view_from_jagged_inverse(base, mutated_view, inverse_return_mode, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor _nested_get_values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::_nested_get_values::call(self_);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::_nested_get_values::call(self_meta);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::_nested_get_values::call(self_);
        } else {
          tmp_output = at::_ops::_nested_get_values_copy::call(self_);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::_nested_get_values::call(base);
          } else {
            return at::_ops::_nested_get_values_copy::call(base);
          }
        },
        [inverse_return_mode = inverse_return_mode](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::_nested_get_values_inverse(base, mutated_view, inverse_return_mode);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor unsqueeze(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::unsqueeze::call(self_, dim);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::unsqueeze::call(self_meta, dim);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::unsqueeze::call(self_, dim);
        } else {
          tmp_output = at::_ops::unsqueeze_copy::call(self_, dim);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, dim = dim](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::unsqueeze::call(base, dim);
          } else {
            return at::_ops::unsqueeze_copy::call(base, dim);
          }
        },
        [inverse_return_mode = inverse_return_mode, dim = dim](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::unsqueeze_inverse(base, mutated_view, inverse_return_mode, dim);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor & unsqueeze_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim) {
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::unsqueeze_::call(self_, dim);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, dim = dim](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::unsqueeze::call(base, dim);
          } else {
            return at::_ops::unsqueeze_copy::call(base, dim);
          }
        },
        [inverse_return_mode = inverse_return_mode, dim = dim](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::unsqueeze_inverse(base, mutated_view, inverse_return_mode, dim);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::unsqueeze_::call(self_meta, dim);
      }
      // This function adds the above view meta to the current tensor and replays them off the base,
      // mutating the size/stride info of the current FunctionalTensorWrapper.
      // Because of this, we need to make sure to run the reference shape function above,
      // BEFORE doing this (otherwise we'll end up runnin the reference function using the wrong sizes/strides)
      at::functionalization::impl::mutate_view_meta(self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      // XLA/LTC don't implement the logic to propagate strides correctly, so we need to rely
      // on a reference implementation here (instead of relying on the output from the forward lambda
      // having the correct stride info)
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(self, reference_tensor_output);
      }
      return self;
    }

    at::Tensor _indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::_indices::call(self_);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::_indices::call(self_meta);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::_indices::call(self_);
        } else {
          tmp_output = at::_ops::_indices_copy::call(self_);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::_indices::call(base);
          } else {
            return at::_ops::_indices_copy::call(base);
          }
        },
        [inverse_return_mode = inverse_return_mode](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::_indices_inverse(base, mutated_view, inverse_return_mode);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor _values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::_values::call(self_);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::_values::call(self_meta);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::_values::call(self_);
        } else {
          tmp_output = at::_ops::_values_copy::call(self_);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::_values::call(base);
          } else {
            return at::_ops::_values_copy::call(base);
          }
        },
        [inverse_return_mode = inverse_return_mode](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::_values_inverse(base, mutated_view, inverse_return_mode);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::indices::call(self_);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::indices::call(self_meta);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::indices::call(self_);
        } else {
          tmp_output = at::_ops::indices_copy::call(self_);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::indices::call(base);
          } else {
            return at::_ops::indices_copy::call(base);
          }
        },
        [inverse_return_mode = inverse_return_mode](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::indices_inverse(base, mutated_view, inverse_return_mode);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::values::call(self_);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::values::call(self_meta);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::values::call(self_);
        } else {
          tmp_output = at::_ops::values_copy::call(self_);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::values::call(base);
          } else {
            return at::_ops::values_copy::call(base);
          }
        },
        [inverse_return_mode = inverse_return_mode](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::values_inverse(base, mutated_view, inverse_return_mode);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor crow_indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::crow_indices::call(self_);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::crow_indices::call(self_meta);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::crow_indices::call(self_);
        } else {
          tmp_output = at::_ops::crow_indices_copy::call(self_);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::crow_indices::call(base);
          } else {
            return at::_ops::crow_indices_copy::call(base);
          }
        },
        [inverse_return_mode = inverse_return_mode](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::crow_indices_inverse(base, mutated_view, inverse_return_mode);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor col_indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::col_indices::call(self_);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::col_indices::call(self_meta);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::col_indices::call(self_);
        } else {
          tmp_output = at::_ops::col_indices_copy::call(self_);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::col_indices::call(base);
          } else {
            return at::_ops::col_indices_copy::call(base);
          }
        },
        [inverse_return_mode = inverse_return_mode](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::col_indices_inverse(base, mutated_view, inverse_return_mode);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor ccol_indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::ccol_indices::call(self_);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::ccol_indices::call(self_meta);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::ccol_indices::call(self_);
        } else {
          tmp_output = at::_ops::ccol_indices_copy::call(self_);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::ccol_indices::call(base);
          } else {
            return at::_ops::ccol_indices_copy::call(base);
          }
        },
        [inverse_return_mode = inverse_return_mode](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::ccol_indices_inverse(base, mutated_view, inverse_return_mode);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor row_indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::row_indices::call(self_);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::row_indices::call(self_meta);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::row_indices::call(self_);
        } else {
          tmp_output = at::_ops::row_indices_copy::call(self_);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::row_indices::call(base);
          } else {
            return at::_ops::row_indices_copy::call(base);
          }
        },
        [inverse_return_mode = inverse_return_mode](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::row_indices_inverse(base, mutated_view, inverse_return_mode);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    ::std::vector<at::Tensor> unbind_int(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::unbind_int::call(self_, dim);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      ::std::vector<at::Tensor> reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::unbind_int::call(self_meta, dim);
      }
      ::std::vector<at::Tensor> tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::unbind_int::call(self_, dim);
        } else {
          tmp_output = at::_ops::unbind_copy_int::call(self_, dim);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, dim = dim](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::unbind_int::call(base, dim)[mutated_view_idx];
          } else {
            return at::_ops::unbind_copy_int::call(base, dim)[mutated_view_idx];
          }
        },
        [inverse_return_mode = inverse_return_mode, dim = dim](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::unbind_int_inverse(base, mutated_view, inverse_return_mode, mutated_view_idx, dim);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/true,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor lift_fresh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::lift_fresh::call(self_);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::lift_fresh::call(self_meta);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::lift_fresh::call(self_);
        } else {
          tmp_output = at::_ops::lift_fresh_copy::call(self_);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::lift_fresh::call(base);
          } else {
            return at::_ops::lift_fresh_copy::call(base);
          }
        },
        [inverse_return_mode = inverse_return_mode](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::lift_fresh_inverse(base, mutated_view, inverse_return_mode);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor view(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::view::call(self_, size);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::view::call(self_meta, size);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::view::call(self_, size);
        } else {
          tmp_output = at::_ops::view_copy::call(self_, size);
        }
      }
      
      bool has_symbolic_inputs = false;
      has_symbolic_inputs = has_symbolic_inputs | (std::any_of(size.begin(), size.end(), [=](auto& arg) { return arg.is_symbolic(); }));
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, size = size.vec()](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::view::call(base, size);
          } else {
            return at::_ops::view_copy::call(base, size);
          }
        },
        [inverse_return_mode = inverse_return_mode, size = size.vec()](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::view_inverse(base, mutated_view, inverse_return_mode, size);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor view_dtype(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::view_dtype::call(self_, dtype);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::view_dtype::call(self_meta, dtype);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::view_dtype::call(self_, dtype);
        } else {
          tmp_output = at::_ops::view_copy_dtype::call(self_, dtype);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, dtype = dtype](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::view_dtype::call(base, dtype);
          } else {
            return at::_ops::view_copy_dtype::call(base, dtype);
          }
        },
        [inverse_return_mode = inverse_return_mode, dtype = dtype](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::view_dtype_inverse(base, mutated_view, inverse_return_mode, dtype);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor unfold(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::unfold::call(self_, dimension, size, step);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::unfold::call(self_meta, dimension, size, step);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::unfold::call(self_, dimension, size, step);
        } else {
          tmp_output = at::_ops::unfold_copy::call(self_, dimension, size, step);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views, dimension = dimension, size = size, step = step](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::unfold::call(base, dimension, size, step);
          } else {
            return at::_ops::unfold_copy::call(base, dimension, size, step);
          }
        },
        [inverse_return_mode = inverse_return_mode, dimension = dimension, size = size, step = step](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::unfold_inverse(base, mutated_view, inverse_return_mode, dimension, size, step);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor alias(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::alias::call(self_);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::alias::call(self_meta);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::alias::call(self_);
        } else {
          tmp_output = at::_ops::alias_copy::call(self_);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::alias::call(base);
          } else {
            return at::_ops::alias_copy::call(base);
          }
        },
        [inverse_return_mode = inverse_return_mode](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::alias_inverse(base, mutated_view, inverse_return_mode);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

    at::Tensor _test_autograd_multiple_dispatch_view(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
      
      at::Tensor self_;
      if (at::functionalization::impl::isFunctionalTensor(self)) {
        
        self_ = at::functionalization::impl::from_functional_tensor(self);
      } else {
        self_ = self;
      }
      if (!at::functionalization::impl::isFunctionalTensor(self)) {
        // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper.
        at::AutoDispatchSkipFunctionalize guard;
        return at::_ops::_test_autograd_multiple_dispatch_view::call(self_);
      }
      auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
      auto inverse_return_mode = (
          reapply_views ? at::functionalization::InverseReturnMode::ViewOrScatterInverse
            : at::functionalization::InverseReturnMode::NeverView
      );
      auto compute_reference_meta =
        self.key_set().has_backend(c10::BackendComponent::XLABit) ||
        self.key_set().has_backend(c10::BackendComponent::LazyBit);
      at::Tensor reference_tensor_output;
      if (compute_reference_meta && !disable_meta_reference()) {
        auto self_meta = to_meta(self);
        at::AutoDispatchSkipFunctionalize func_guard;
        c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch);
        reference_tensor_output = at::_ops::_test_autograd_multiple_dispatch_view::call(self_meta);
      }
      at::Tensor tmp_output;
      {
        at::AutoDispatchSkipFunctionalize guard;
        if (reapply_views) {
          tmp_output = at::_ops::_test_autograd_multiple_dispatch_view::call(self_);
        } else {
          tmp_output = at::_ops::_test_autograd_multiple_dispatch_view_copy::call(self_);
        }
      }
      
      bool has_symbolic_inputs = false;
      
      at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
        [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
          if (reapply_views) {
            return at::_ops::_test_autograd_multiple_dispatch_view::call(base);
          } else {
            return at::_ops::_test_autograd_multiple_dispatch_view_copy::call(base);
          }
        },
        [inverse_return_mode = inverse_return_mode](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
          return at::functionalization::FunctionalInverses::_test_autograd_multiple_dispatch_view_inverse(base, mutated_view, inverse_return_mode);
        },
        /*has_symbolic_inputs=*/has_symbolic_inputs,
        /*is_multi_output=*/false,
        /*is_as_strided=*/false
      );
      auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
      // See  Note [Propagating strides in the functionalization pass]
      if (compute_reference_meta && !disable_meta_reference()) {
        at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
      }
      return out;
    }

}  // namespace functionalization

namespace {

TORCH_LIBRARY_IMPL(aten, Functionalize, m) {
  m.impl("_new_zeros_with_same_feature_meta.out", TORCH_FN(functionalization::_new_zeros_with_same_feature_meta_out_out));
  m.impl("_cudnn_ctc_loss.out", TORCH_FN(functionalization::_cudnn_ctc_loss_out_out));
  m.impl("_cudnn_rnn_flatten_weight.out", TORCH_FN(functionalization::_cudnn_rnn_flatten_weight_out_out));
  m.impl("_cudnn_rnn.out", TORCH_FN(functionalization::_cudnn_rnn_out_out));
  m.impl("_cudnn_rnn_backward.out", TORCH_FN(functionalization::_cudnn_rnn_backward_out_out));
  m.impl("_cudnn_init_dropout_state.out", TORCH_FN(functionalization::_cudnn_init_dropout_state_out_out));
  m.impl("_fused_dropout.out", TORCH_FN(functionalization::_fused_dropout_out_out));
  m.impl("_masked_scale.out", TORCH_FN(functionalization::_masked_scale_out_out));
  m.impl("native_dropout.out", TORCH_FN(functionalization::native_dropout_out_out));
  m.impl("native_dropout_backward.out", TORCH_FN(functionalization::native_dropout_backward_out_out));
  m.impl("abs.out", TORCH_FN(functionalization::abs_out_out));
  m.impl("abs_", TORCH_FN(functionalization::abs_));
  m.impl("angle.out", TORCH_FN(functionalization::angle_out_out));
  m.impl("sgn.out", TORCH_FN(functionalization::sgn_out_out));
  m.impl("sgn_", TORCH_FN(functionalization::sgn_));
  m.impl("_conj_physical.out", TORCH_FN(functionalization::_conj_physical_out_out));
  m.impl("conj_physical.out", TORCH_FN(functionalization::conj_physical_out_out));
  m.impl("conj_physical_", TORCH_FN(functionalization::conj_physical_));
  m.impl("acos.out", TORCH_FN(functionalization::acos_out_out));
  m.impl("acos_", TORCH_FN(functionalization::acos_));
  m.impl("avg_pool1d.out", TORCH_FN(functionalization::avg_pool1d_out_out));
  m.impl("adaptive_avg_pool1d.out", TORCH_FN(functionalization::adaptive_avg_pool1d_out_out));
  m.impl("add.out", TORCH_FN(functionalization::add_out_out));
  m.impl("add_.Tensor", TORCH_FN(functionalization::add__Tensor));
  m.impl("_add_relu.out", TORCH_FN(functionalization::_add_relu_out_out));
  m.impl("_add_relu_.Tensor", TORCH_FN(functionalization::_add_relu__Tensor));
  m.impl("_add_relu.Scalar_out", TORCH_FN(functionalization::_add_relu_out_Scalar_out));
  m.impl("_add_relu_.Scalar", TORCH_FN(functionalization::_add_relu__Scalar));
  m.impl("add.Scalar_out", TORCH_FN(functionalization::add_out_Scalar_out));
  m.impl("add_.Scalar", TORCH_FN(functionalization::add__Scalar));
  m.impl("addmv.out", TORCH_FN(functionalization::addmv_out_out));
  m.impl("addmv_", TORCH_FN(functionalization::addmv_));
  m.impl("addr.out", TORCH_FN(functionalization::addr_out_out));
  m.impl("addr_", TORCH_FN(functionalization::addr_));
  m.impl("affine_grid_generator.out", TORCH_FN(functionalization::affine_grid_generator_out_out));
  m.impl("_test_functorch_fallback.out", TORCH_FN(functionalization::_test_functorch_fallback_out_out));
  m.impl("all.out", TORCH_FN(functionalization::all_out_out));
  m.impl("all.dims_out", TORCH_FN(functionalization::all_out_dims_out));
  m.impl("any.out", TORCH_FN(functionalization::any_out_out));
  m.impl("any.dims_out", TORCH_FN(functionalization::any_out_dims_out));
  m.impl("arange.out", TORCH_FN(functionalization::arange_out_out));
  m.impl("arange.start_out", TORCH_FN(functionalization::arange_out_start_out));
  m.impl("argmax.out", TORCH_FN(functionalization::argmax_out_out));
  m.impl("argmin.out", TORCH_FN(functionalization::argmin_out_out));
  m.impl("acosh.out", TORCH_FN(functionalization::acosh_out_out));
  m.impl("acosh_", TORCH_FN(functionalization::acosh_));
  m.impl("asinh.out", TORCH_FN(functionalization::asinh_out_out));
  m.impl("asinh_", TORCH_FN(functionalization::asinh_));
  m.impl("atanh.out", TORCH_FN(functionalization::atanh_out_out));
  m.impl("atanh_", TORCH_FN(functionalization::atanh_));
  m.impl("asin.out", TORCH_FN(functionalization::asin_out_out));
  m.impl("asin_", TORCH_FN(functionalization::asin_));
  m.impl("atan.out", TORCH_FN(functionalization::atan_out_out));
  m.impl("atan_", TORCH_FN(functionalization::atan_));
  m.impl("baddbmm.out", TORCH_FN(functionalization::baddbmm_out_out));
  m.impl("baddbmm_", TORCH_FN(functionalization::baddbmm_));
  m.impl("bartlett_window.out", TORCH_FN(functionalization::bartlett_window_out_out));
  m.impl("bartlett_window.periodic_out", TORCH_FN(functionalization::bartlett_window_out_periodic_out));
  m.impl("quantized_batch_norm.out", TORCH_FN(functionalization::quantized_batch_norm_out_out));
  m.impl("bernoulli.out", TORCH_FN(functionalization::bernoulli_out_out));
  m.impl("bernoulli.Tensor_out", TORCH_FN(functionalization::bernoulli_out_Tensor_out));
  m.impl("bernoulli_.Tensor", TORCH_FN(functionalization::bernoulli__Tensor));
  m.impl("bernoulli.float_out", TORCH_FN(functionalization::bernoulli_out_float_out));
  m.impl("bernoulli_.float", TORCH_FN(functionalization::bernoulli__float));
  m.impl("binary_cross_entropy.out", TORCH_FN(functionalization::binary_cross_entropy_out_out));
  m.impl("binary_cross_entropy_backward.grad_input", TORCH_FN(functionalization::binary_cross_entropy_backward_out_grad_input));
  m.impl("binary_cross_entropy_with_logits.out", TORCH_FN(functionalization::binary_cross_entropy_with_logits_out_out));
  m.impl("bincount.out", TORCH_FN(functionalization::bincount_out_out));
  m.impl("bitwise_not.out", TORCH_FN(functionalization::bitwise_not_out_out));
  m.impl("bitwise_not_", TORCH_FN(functionalization::bitwise_not_));
  m.impl("copysign.out", TORCH_FN(functionalization::copysign_out_out));
  m.impl("copysign_.Tensor", TORCH_FN(functionalization::copysign__Tensor));
  m.impl("copysign.Scalar_out", TORCH_FN(functionalization::copysign_out_Scalar_out));
  m.impl("copysign_.Scalar", TORCH_FN(functionalization::copysign__Scalar));
  m.impl("logical_not.out", TORCH_FN(functionalization::logical_not_out_out));
  m.impl("logical_not_", TORCH_FN(functionalization::logical_not_));
  m.impl("logical_xor.out", TORCH_FN(functionalization::logical_xor_out_out));
  m.impl("logical_xor_", TORCH_FN(functionalization::logical_xor_));
  m.impl("logical_and.out", TORCH_FN(functionalization::logical_and_out_out));
  m.impl("logical_and_", TORCH_FN(functionalization::logical_and_));
  m.impl("logical_or.out", TORCH_FN(functionalization::logical_or_out_out));
  m.impl("logical_or_", TORCH_FN(functionalization::logical_or_));
  m.impl("blackman_window.out", TORCH_FN(functionalization::blackman_window_out_out));
  m.impl("blackman_window.periodic_out", TORCH_FN(functionalization::blackman_window_out_periodic_out));
  m.impl("bmm.out", TORCH_FN(functionalization::bmm_out_out));
  m.impl("cat.out", TORCH_FN(functionalization::cat_out_out));
  m.impl("block_diag.out", TORCH_FN(functionalization::block_diag_out_out));
  m.impl("ceil.out", TORCH_FN(functionalization::ceil_out_out));
  m.impl("ceil_", TORCH_FN(functionalization::ceil_));
  m.impl("clamp.out", TORCH_FN(functionalization::clamp_out_out));
  m.impl("clamp_", TORCH_FN(functionalization::clamp_));
  m.impl("clamp.Tensor_out", TORCH_FN(functionalization::clamp_out_Tensor_out));
  m.impl("clamp_.Tensor", TORCH_FN(functionalization::clamp__Tensor));
  m.impl("clamp_max.out", TORCH_FN(functionalization::clamp_max_out_out));
  m.impl("clamp_max_", TORCH_FN(functionalization::clamp_max_));
  m.impl("clamp_max.Tensor_out", TORCH_FN(functionalization::clamp_max_out_Tensor_out));
  m.impl("clamp_max_.Tensor", TORCH_FN(functionalization::clamp_max__Tensor));
  m.impl("clamp_min.out", TORCH_FN(functionalization::clamp_min_out_out));
  m.impl("clamp_min_", TORCH_FN(functionalization::clamp_min_));
  m.impl("clamp_min.Tensor_out", TORCH_FN(functionalization::clamp_min_out_Tensor_out));
  m.impl("clamp_min_.Tensor", TORCH_FN(functionalization::clamp_min__Tensor));
  m.impl("complex.out", TORCH_FN(functionalization::complex_out_out));
  m.impl("polar.out", TORCH_FN(functionalization::polar_out_out));
  m.impl("constant_pad_nd.out", TORCH_FN(functionalization::constant_pad_nd_out_out));
  m.impl("convolution.out", TORCH_FN(functionalization::convolution_out_out));
  m.impl("convolution_backward.out", TORCH_FN(functionalization::convolution_backward_out_out));
  m.impl("convolution_overrideable.out", TORCH_FN(functionalization::convolution_overrideable_out_out));
  m.impl("convolution_backward_overrideable.out", TORCH_FN(functionalization::convolution_backward_overrideable_out_out));
  m.impl("_convolution.out", TORCH_FN(functionalization::_convolution_out_out));
  m.impl("conv_tbc.out", TORCH_FN(functionalization::conv_tbc_out_out));
  m.impl("copy.out", TORCH_FN(functionalization::copy_out_out));
  m.impl("copy_", TORCH_FN(functionalization::copy_));
  m.impl("_copy_from.out", TORCH_FN(functionalization::_copy_from_out_out));
  m.impl("_copy_from_and_resize.out", TORCH_FN(functionalization::_copy_from_and_resize_out_out));
  m.impl("cos.out", TORCH_FN(functionalization::cos_out_out));
  m.impl("cos_", TORCH_FN(functionalization::cos_));
  m.impl("cosh.out", TORCH_FN(functionalization::cosh_out_out));
  m.impl("cosh_", TORCH_FN(functionalization::cosh_));
  m.impl("count_nonzero.dim_IntList_out", TORCH_FN(functionalization::count_nonzero_out_dim_IntList_out));
  m.impl("count_nonzero.out", TORCH_FN(functionalization::count_nonzero_out_out));
  m.impl("cudnn_affine_grid_generator.out", TORCH_FN(functionalization::cudnn_affine_grid_generator_out_out));
  m.impl("cudnn_affine_grid_generator_backward.out", TORCH_FN(functionalization::cudnn_affine_grid_generator_backward_out_out));
  m.impl("cudnn_batch_norm.out", TORCH_FN(functionalization::cudnn_batch_norm_out_out));
  m.impl("cudnn_batch_norm_backward.out", TORCH_FN(functionalization::cudnn_batch_norm_backward_out_out));
  m.impl("cudnn_convolution.out", TORCH_FN(functionalization::cudnn_convolution_out_out));
  m.impl("cudnn_convolution_transpose.out", TORCH_FN(functionalization::cudnn_convolution_transpose_out_out));
  m.impl("_mps_convolution_transpose.out", TORCH_FN(functionalization::_mps_convolution_transpose_out_out));
  m.impl("mps_convolution_transpose_backward.out", TORCH_FN(functionalization::mps_convolution_transpose_backward_out_out));
  m.impl("cudnn_convolution_relu.out", TORCH_FN(functionalization::cudnn_convolution_relu_out_out));
  m.impl("cudnn_convolution_add_relu.out", TORCH_FN(functionalization::cudnn_convolution_add_relu_out_out));
  m.impl("cudnn_grid_sampler.out", TORCH_FN(functionalization::cudnn_grid_sampler_out_out));
  m.impl("cudnn_grid_sampler_backward.out", TORCH_FN(functionalization::cudnn_grid_sampler_backward_out_out));
  m.impl("cummax.out", TORCH_FN(functionalization::cummax_out_out));
  m.impl("cummin.out", TORCH_FN(functionalization::cummin_out_out));
  m.impl("cumprod.out", TORCH_FN(functionalization::cumprod_out_out));
  m.impl("cumprod_", TORCH_FN(functionalization::cumprod_));
  m.impl("cumsum.out", TORCH_FN(functionalization::cumsum_out_out));
  m.impl("cumsum_", TORCH_FN(functionalization::cumsum_));
  m.impl("_ctc_loss.out", TORCH_FN(functionalization::_ctc_loss_out_out));
  m.impl("_ctc_loss.Tensor_out", TORCH_FN(functionalization::_ctc_loss_out_Tensor_out));
  m.impl("_ctc_loss_backward.out", TORCH_FN(functionalization::_ctc_loss_backward_out_out));
  m.impl("diag_embed.out", TORCH_FN(functionalization::diag_embed_out_out));
  m.impl("diagonal_backward.out", TORCH_FN(functionalization::diagonal_backward_out_out));
  m.impl("div.out", TORCH_FN(functionalization::div_out_out));
  m.impl("div_.Tensor", TORCH_FN(functionalization::div__Tensor));
  m.impl("div.out_mode", TORCH_FN(functionalization::div_out_out_mode));
  m.impl("div_.Tensor_mode", TORCH_FN(functionalization::div__Tensor_mode));
  m.impl("div.Scalar_out", TORCH_FN(functionalization::div_out_Scalar_out));
  m.impl("div_.Scalar", TORCH_FN(functionalization::div__Scalar));
  m.impl("div.Scalar_mode_out", TORCH_FN(functionalization::div_out_Scalar_mode_out));
  m.impl("div_.Scalar_mode", TORCH_FN(functionalization::div__Scalar_mode));
  m.impl("dot.out", TORCH_FN(functionalization::dot_out_out));
  m.impl("vdot.out", TORCH_FN(functionalization::vdot_out_out));
  m.impl("embedding.out", TORCH_FN(functionalization::embedding_out_out));
  m.impl("embedding_dense_backward.out", TORCH_FN(functionalization::embedding_dense_backward_out_out));
  m.impl("embedding_renorm.out", TORCH_FN(functionalization::embedding_renorm_out_out));
  m.impl("embedding_renorm_", TORCH_FN(functionalization::embedding_renorm_));
  m.impl("_embedding_bag_forward_only.out", TORCH_FN(functionalization::_embedding_bag_forward_only_out_out));
  m.impl("_embedding_bag.out", TORCH_FN(functionalization::_embedding_bag_out_out));
  m.impl("_embedding_bag_dense_backward.out", TORCH_FN(functionalization::_embedding_bag_dense_backward_out_out));
  m.impl("_embedding_bag_per_sample_weights_backward.out", TORCH_FN(functionalization::_embedding_bag_per_sample_weights_backward_out_out));
  m.impl("empty.names_out", TORCH_FN(functionalization::empty_out_names_out));
  m.impl("empty_permuted.out", TORCH_FN(functionalization::empty_permuted_out_out));
  m.impl("new_empty.out", TORCH_FN(functionalization::new_empty_out_out));
  m.impl("new_empty_strided.out", TORCH_FN(functionalization::new_empty_strided_out_out));
  m.impl("new_full.out", TORCH_FN(functionalization::new_full_out_out));
  m.impl("new_zeros.out", TORCH_FN(functionalization::new_zeros_out_out));
  m.impl("new_ones.out", TORCH_FN(functionalization::new_ones_out_out));
  m.impl("_empty_affine_quantized.out", TORCH_FN(functionalization::_empty_affine_quantized_out_out));
  m.impl("_empty_per_channel_affine_quantized.out", TORCH_FN(functionalization::_empty_per_channel_affine_quantized_out_out));
  m.impl("_resize_output.out", TORCH_FN(functionalization::_resize_output_out_out));
  m.impl("_resize_output_", TORCH_FN(functionalization::_resize_output_));
  m.impl("empty_quantized.out", TORCH_FN(functionalization::empty_quantized_out_out));
  m.impl("empty_like.out", TORCH_FN(functionalization::empty_like_out_out));
  m.impl("empty_strided.out", TORCH_FN(functionalization::empty_strided_out_out));
  m.impl("erf.out", TORCH_FN(functionalization::erf_out_out));
  m.impl("erf_", TORCH_FN(functionalization::erf_));
  m.impl("erfc.out", TORCH_FN(functionalization::erfc_out_out));
  m.impl("erfc_", TORCH_FN(functionalization::erfc_));
  m.impl("exp.out", TORCH_FN(functionalization::exp_out_out));
  m.impl("exp_", TORCH_FN(functionalization::exp_));
  m.impl("exp2.out", TORCH_FN(functionalization::exp2_out_out));
  m.impl("exp2_", TORCH_FN(functionalization::exp2_));
  m.impl("expm1.out", TORCH_FN(functionalization::expm1_out_out));
  m.impl("expm1_", TORCH_FN(functionalization::expm1_));
  m.impl("eye.out", TORCH_FN(functionalization::eye_out_out));
  m.impl("eye.m_out", TORCH_FN(functionalization::eye_out_m_out));
  m.impl("fill.Scalar_out", TORCH_FN(functionalization::fill_out_Scalar_out));
  m.impl("fill_.Scalar", TORCH_FN(functionalization::fill__Scalar));
  m.impl("fill.Tensor_out", TORCH_FN(functionalization::fill_out_Tensor_out));
  m.impl("fill_.Tensor", TORCH_FN(functionalization::fill__Tensor));
  m.impl("floor.out", TORCH_FN(functionalization::floor_out_out));
  m.impl("floor_", TORCH_FN(functionalization::floor_));
  m.impl("floor_divide.out", TORCH_FN(functionalization::floor_divide_out_out));
  m.impl("floor_divide_.Tensor", TORCH_FN(functionalization::floor_divide__Tensor));
  m.impl("floor_divide.Scalar_out", TORCH_FN(functionalization::floor_divide_out_Scalar_out));
  m.impl("floor_divide_.Scalar", TORCH_FN(functionalization::floor_divide__Scalar));
  m.impl("frac.out", TORCH_FN(functionalization::frac_out_out));
  m.impl("frac_", TORCH_FN(functionalization::frac_));
  m.impl("full.names_out", TORCH_FN(functionalization::full_out_names_out));
  m.impl("full.out", TORCH_FN(functionalization::full_out_out));
  m.impl("full_like.out", TORCH_FN(functionalization::full_like_out_out));
  m.impl("from_file.out", TORCH_FN(functionalization::from_file_out_out));
  m.impl("gcd.out", TORCH_FN(functionalization::gcd_out_out));
  m.impl("gcd_", TORCH_FN(functionalization::gcd_));
  m.impl("lcm.out", TORCH_FN(functionalization::lcm_out_out));
  m.impl("lcm_", TORCH_FN(functionalization::lcm_));
  m.impl("grid_sampler_2d.out", TORCH_FN(functionalization::grid_sampler_2d_out_out));
  m.impl("grid_sampler_2d_backward.out", TORCH_FN(functionalization::grid_sampler_2d_backward_out_out));
  m.impl("_grid_sampler_2d_cpu_fallback.out", TORCH_FN(functionalization::_grid_sampler_2d_cpu_fallback_out_out));
  m.impl("grid_sampler_3d.out", TORCH_FN(functionalization::grid_sampler_3d_out_out));
  m.impl("grid_sampler_3d_backward.out", TORCH_FN(functionalization::grid_sampler_3d_backward_out_out));
  m.impl("hann_window.out", TORCH_FN(functionalization::hann_window_out_out));
  m.impl("hann_window.periodic_out", TORCH_FN(functionalization::hann_window_out_periodic_out));
  m.impl("hamming_window.out", TORCH_FN(functionalization::hamming_window_out_out));
  m.impl("hamming_window.periodic_out", TORCH_FN(functionalization::hamming_window_out_periodic_out));
  m.impl("hamming_window.periodic_alpha_out", TORCH_FN(functionalization::hamming_window_out_periodic_alpha_out));
  m.impl("hamming_window.periodic_alpha_beta_out", TORCH_FN(functionalization::hamming_window_out_periodic_alpha_beta_out));
  m.impl("kaiser_window.out", TORCH_FN(functionalization::kaiser_window_out_out));
  m.impl("kaiser_window.periodic_out", TORCH_FN(functionalization::kaiser_window_out_periodic_out));
  m.impl("kaiser_window.beta_out", TORCH_FN(functionalization::kaiser_window_out_beta_out));
  m.impl("native_group_norm.out", TORCH_FN(functionalization::native_group_norm_out_out));
  m.impl("native_group_norm_backward.out", TORCH_FN(functionalization::native_group_norm_backward_out_out));
  m.impl("_fft_r2c.out", TORCH_FN(functionalization::_fft_r2c_out_out));
  m.impl("_fft_c2r.out", TORCH_FN(functionalization::_fft_c2r_out_out));
  m.impl("_fft_c2c.out", TORCH_FN(functionalization::_fft_c2c_out_out));
  m.impl("index.Tensor_out", TORCH_FN(functionalization::index_out_Tensor_out));
  m.impl("index_copy.out", TORCH_FN(functionalization::index_copy_out_out));
  m.impl("index_copy_", TORCH_FN(functionalization::index_copy_));
  m.impl("index_put.out", TORCH_FN(functionalization::index_put_out_out));
  m.impl("index_put_", TORCH_FN(functionalization::index_put_));
  m.impl("_index_put_impl.out", TORCH_FN(functionalization::_index_put_impl_out_out));
  m.impl("_index_put_impl_", TORCH_FN(functionalization::_index_put_impl_));
  m.impl("isin.Tensor_Tensor_out", TORCH_FN(functionalization::isin_out_Tensor_Tensor_out));
  m.impl("isin.Tensor_Scalar_out", TORCH_FN(functionalization::isin_out_Tensor_Scalar_out));
  m.impl("isin.Scalar_Tensor_out", TORCH_FN(functionalization::isin_out_Scalar_Tensor_out));
  m.impl("isnan.out", TORCH_FN(functionalization::isnan_out_out));
  m.impl("kthvalue.values", TORCH_FN(functionalization::kthvalue_out_values));
  m.impl("native_layer_norm.out", TORCH_FN(functionalization::native_layer_norm_out_out));
  m.impl("native_layer_norm_backward.out", TORCH_FN(functionalization::native_layer_norm_backward_out_out));
  m.impl("nan_to_num.out", TORCH_FN(functionalization::nan_to_num_out_out));
  m.impl("nan_to_num_", TORCH_FN(functionalization::nan_to_num_));
  m.impl("linear.out", TORCH_FN(functionalization::linear_out_out));
  m.impl("linear_backward.out", TORCH_FN(functionalization::linear_backward_out_out));
  m.impl("mkldnn_linear.out", TORCH_FN(functionalization::mkldnn_linear_out_out));
  m.impl("mkldnn_linear_backward_input.out", TORCH_FN(functionalization::mkldnn_linear_backward_input_out_out));
  m.impl("mkldnn_linear_backward_weights.out", TORCH_FN(functionalization::mkldnn_linear_backward_weights_out_out));
  m.impl("mkldnn_linear_backward.out", TORCH_FN(functionalization::mkldnn_linear_backward_out_out));
  m.impl("linspace.out", TORCH_FN(functionalization::linspace_out_out));
  m.impl("linspace.Tensor_Tensor_out", TORCH_FN(functionalization::linspace_out_Tensor_Tensor_out));
  m.impl("linspace.Tensor_Scalar_out", TORCH_FN(functionalization::linspace_out_Tensor_Scalar_out));
  m.impl("linspace.Scalar_Tensor_out", TORCH_FN(functionalization::linspace_out_Scalar_Tensor_out));
  m.impl("log.out", TORCH_FN(functionalization::log_out_out));
  m.impl("log_", TORCH_FN(functionalization::log_));
  m.impl("log10.out", TORCH_FN(functionalization::log10_out_out));
  m.impl("log10_", TORCH_FN(functionalization::log10_));
  m.impl("log1p.out", TORCH_FN(functionalization::log1p_out_out));
  m.impl("log1p_", TORCH_FN(functionalization::log1p_));
  m.impl("log2.out", TORCH_FN(functionalization::log2_out_out));
  m.impl("log2_", TORCH_FN(functionalization::log2_));
  m.impl("logaddexp.out", TORCH_FN(functionalization::logaddexp_out_out));
  m.impl("logaddexp2.out", TORCH_FN(functionalization::logaddexp2_out_out));
  m.impl("xlogy.OutTensor", TORCH_FN(functionalization::xlogy_out_OutTensor));
  m.impl("xlogy_.Tensor", TORCH_FN(functionalization::xlogy__Tensor));
  m.impl("xlogy.OutScalar_Self", TORCH_FN(functionalization::xlogy_out_OutScalar_Self));
  m.impl("xlogy.OutScalar_Other", TORCH_FN(functionalization::xlogy_out_OutScalar_Other));
  m.impl("xlogy_.Scalar_Other", TORCH_FN(functionalization::xlogy__Scalar_Other));
  m.impl("logspace.out", TORCH_FN(functionalization::logspace_out_out));
  m.impl("logspace.Tensor_Tensor_out", TORCH_FN(functionalization::logspace_out_Tensor_Tensor_out));
  m.impl("logspace.Tensor_Scalar_out", TORCH_FN(functionalization::logspace_out_Tensor_Scalar_out));
  m.impl("logspace.Scalar_Tensor_out", TORCH_FN(functionalization::logspace_out_Scalar_Tensor_out));
  m.impl("log_softmax.int_out", TORCH_FN(functionalization::log_softmax_out_int_out));
  m.impl("_log_softmax.out", TORCH_FN(functionalization::_log_softmax_out_out));
  m.impl("_log_softmax_backward_data.out", TORCH_FN(functionalization::_log_softmax_backward_data_out_out));
  m.impl("_logcumsumexp.out", TORCH_FN(functionalization::_logcumsumexp_out_out));
  m.impl("logcumsumexp.out", TORCH_FN(functionalization::logcumsumexp_out_out));
  m.impl("logsumexp.out", TORCH_FN(functionalization::logsumexp_out_out));
  m.impl("matmul_backward.out", TORCH_FN(functionalization::matmul_backward_out_out));
  m.impl("_aminmax.out", TORCH_FN(functionalization::_aminmax_out_out));
  m.impl("_aminmax.dim_out", TORCH_FN(functionalization::_aminmax_out_dim_out));
  m.impl("aminmax.out", TORCH_FN(functionalization::aminmax_out_out));
  m.impl("_compute_linear_combination.out", TORCH_FN(functionalization::_compute_linear_combination_out_out));
  m.impl("max.dim_max", TORCH_FN(functionalization::max_out_dim_max));
  m.impl("amax.out", TORCH_FN(functionalization::amax_out_out));
  m.impl("max_pool2d_backward.out", TORCH_FN(functionalization::max_pool2d_backward_out_out));
  m.impl("mkldnn_max_pool2d.out", TORCH_FN(functionalization::mkldnn_max_pool2d_out_out));
  m.impl("mkldnn_max_pool2d_backward.out", TORCH_FN(functionalization::mkldnn_max_pool2d_backward_out_out));
  m.impl("mkldnn_max_pool3d.out", TORCH_FN(functionalization::mkldnn_max_pool3d_out_out));
  m.impl("mkldnn_max_pool3d_backward.out", TORCH_FN(functionalization::mkldnn_max_pool3d_backward_out_out));
  m.impl("quantized_max_pool1d.out", TORCH_FN(functionalization::quantized_max_pool1d_out_out));
  m.impl("quantized_max_pool2d.out", TORCH_FN(functionalization::quantized_max_pool2d_out_out));
  m.impl("quantized_max_pool3d.out", TORCH_FN(functionalization::quantized_max_pool3d_out_out));
  m.impl("mean.dtype_out", TORCH_FN(functionalization::mean_out_dtype_out));
  m.impl("mean.out", TORCH_FN(functionalization::mean_out_out));
  m.impl("median.out", TORCH_FN(functionalization::median_out_out));
  m.impl("median.dim_values", TORCH_FN(functionalization::median_out_dim_values));
  m.impl("nanmedian.out", TORCH_FN(functionalization::nanmedian_out_out));
  m.impl("nanmedian.dim_values", TORCH_FN(functionalization::nanmedian_out_dim_values));
  m.impl("min.dim_min", TORCH_FN(functionalization::min_out_dim_min));
  m.impl("amin.out", TORCH_FN(functionalization::amin_out_out));
  m.impl("_mps_convolution.out", TORCH_FN(functionalization::_mps_convolution_out_out));
  m.impl("mps_convolution_backward.out", TORCH_FN(functionalization::mps_convolution_backward_out_out));
  m.impl("mkldnn_convolution.out", TORCH_FN(functionalization::mkldnn_convolution_out_out));
  m.impl("mkldnn_rnn_layer.out", TORCH_FN(functionalization::mkldnn_rnn_layer_out_out));
  m.impl("mkldnn_rnn_layer_backward.out", TORCH_FN(functionalization::mkldnn_rnn_layer_backward_out_out));
  m.impl("miopen_batch_norm.out", TORCH_FN(functionalization::miopen_batch_norm_out_out));
  m.impl("miopen_batch_norm_backward.out", TORCH_FN(functionalization::miopen_batch_norm_backward_out_out));
  m.impl("miopen_convolution.out", TORCH_FN(functionalization::miopen_convolution_out_out));
  m.impl("miopen_convolution_transpose.out", TORCH_FN(functionalization::miopen_convolution_transpose_out_out));
  m.impl("miopen_depthwise_convolution.out", TORCH_FN(functionalization::miopen_depthwise_convolution_out_out));
  m.impl("miopen_rnn.out", TORCH_FN(functionalization::miopen_rnn_out_out));
  m.impl("miopen_rnn_backward.out", TORCH_FN(functionalization::miopen_rnn_backward_out_out));
  m.impl("mm.out", TORCH_FN(functionalization::mm_out_out));
  m.impl("_int_mm.out", TORCH_FN(functionalization::_int_mm_out_out));
  m.impl("_sparse_sparse_matmul.out", TORCH_FN(functionalization::_sparse_sparse_matmul_out_out));
  m.impl("mode.values", TORCH_FN(functionalization::mode_out_values));
  m.impl("mul.out", TORCH_FN(functionalization::mul_out_out));
  m.impl("mul_.Tensor", TORCH_FN(functionalization::mul__Tensor));
  m.impl("mul.Scalar_out", TORCH_FN(functionalization::mul_out_Scalar_out));
  m.impl("mul_.Scalar", TORCH_FN(functionalization::mul__Scalar));
  m.impl("mv.out", TORCH_FN(functionalization::mv_out_out));
  m.impl("mvlgamma.out", TORCH_FN(functionalization::mvlgamma_out_out));
  m.impl("mvlgamma_", TORCH_FN(functionalization::mvlgamma_));
  m.impl("narrow_copy.out", TORCH_FN(functionalization::narrow_copy_out_out));
  m.impl("native_batch_norm.out", TORCH_FN(functionalization::native_batch_norm_out_out));
  m.impl("_native_batch_norm_legit.out", TORCH_FN(functionalization::_native_batch_norm_legit_out_out));
  m.impl("_native_batch_norm_legit", TORCH_FN(functionalization::_native_batch_norm_legit));
  m.impl("_native_batch_norm_legit_no_training.out", TORCH_FN(functionalization::_native_batch_norm_legit_no_training_out_out));
  m.impl("_native_batch_norm_legit.no_stats_out", TORCH_FN(functionalization::_native_batch_norm_legit_out_no_stats_out));
  m.impl("batch_norm_stats.out", TORCH_FN(functionalization::batch_norm_stats_out_out));
  m.impl("batch_norm_elemt.out", TORCH_FN(functionalization::batch_norm_elemt_out_out));
  m.impl("batch_norm_gather_stats.out", TORCH_FN(functionalization::batch_norm_gather_stats_out_out));
  m.impl("batch_norm_gather_stats_with_counts.out", TORCH_FN(functionalization::batch_norm_gather_stats_with_counts_out_out));
  m.impl("native_batch_norm_backward.out", TORCH_FN(functionalization::native_batch_norm_backward_out_out));
  m.impl("batch_norm_backward_reduce.out", TORCH_FN(functionalization::batch_norm_backward_reduce_out_out));
  m.impl("batch_norm_backward_elemt.out", TORCH_FN(functionalization::batch_norm_backward_elemt_out_out));
  m.impl("batch_norm_update_stats.out", TORCH_FN(functionalization::batch_norm_update_stats_out_out));
  m.impl("_nnpack_spatial_convolution.out", TORCH_FN(functionalization::_nnpack_spatial_convolution_out_out));
  m.impl("ones.names_out", TORCH_FN(functionalization::ones_out_names_out));
  m.impl("ones.out", TORCH_FN(functionalization::ones_out_out));
  m.impl("ones_like.out", TORCH_FN(functionalization::ones_like_out_out));
  m.impl("_euclidean_dist.out", TORCH_FN(functionalization::_euclidean_dist_out_out));
  m.impl("_cdist_forward.out", TORCH_FN(functionalization::_cdist_forward_out_out));
  m.impl("_cdist_backward.out", TORCH_FN(functionalization::_cdist_backward_out_out));
  m.impl("_pdist_forward.out", TORCH_FN(functionalization::_pdist_forward_out_out));
  m.impl("_pdist_backward.out", TORCH_FN(functionalization::_pdist_backward_out_out));
  m.impl("pixel_shuffle.out", TORCH_FN(functionalization::pixel_shuffle_out_out));
  m.impl("pixel_unshuffle.out", TORCH_FN(functionalization::pixel_unshuffle_out_out));
  m.impl("channel_shuffle.out", TORCH_FN(functionalization::channel_shuffle_out_out));
  m.impl("_pin_memory.out", TORCH_FN(functionalization::_pin_memory_out_out));
  m.impl("rad2deg.out", TORCH_FN(functionalization::rad2deg_out_out));
  m.impl("rad2deg_", TORCH_FN(functionalization::rad2deg_));
  m.impl("deg2rad.out", TORCH_FN(functionalization::deg2rad_out_out));
  m.impl("deg2rad_", TORCH_FN(functionalization::deg2rad_));
  m.impl("scalar_tensor.out", TORCH_FN(functionalization::scalar_tensor_out_out));
  m.impl("rand.names_out", TORCH_FN(functionalization::rand_out_names_out));
  m.impl("rand.generator_with_names_out", TORCH_FN(functionalization::rand_out_generator_with_names_out));
  m.impl("rand.out", TORCH_FN(functionalization::rand_out_out));
  m.impl("rand_like.out", TORCH_FN(functionalization::rand_like_out_out));
  m.impl("randint.out", TORCH_FN(functionalization::randint_out_out));
  m.impl("randint.generator_out", TORCH_FN(functionalization::randint_out_generator_out));
  m.impl("randint.low_out", TORCH_FN(functionalization::randint_out_low_out));
  m.impl("randint.low_generator_out", TORCH_FN(functionalization::randint_out_low_generator_out));
  m.impl("randint_like.out", TORCH_FN(functionalization::randint_like_out_out));
  m.impl("randint_like.low_dtype_out", TORCH_FN(functionalization::randint_like_out_low_dtype_out));
  m.impl("randn.names_out", TORCH_FN(functionalization::randn_out_names_out));
  m.impl("randn.generator_with_names_out", TORCH_FN(functionalization::randn_out_generator_with_names_out));
  m.impl("randn_like.out", TORCH_FN(functionalization::randn_like_out_out));
  m.impl("randperm.out", TORCH_FN(functionalization::randperm_out_out));
  m.impl("randperm.generator_out", TORCH_FN(functionalization::randperm_out_generator_out));
  m.impl("range.out", TORCH_FN(functionalization::range_out_out));
  m.impl("range.out_", TORCH_FN(functionalization::range_out_out_));
  m.impl("reciprocal.out", TORCH_FN(functionalization::reciprocal_out_out));
  m.impl("reciprocal_", TORCH_FN(functionalization::reciprocal_));
  m.impl("neg.out", TORCH_FN(functionalization::neg_out_out));
  m.impl("neg_", TORCH_FN(functionalization::neg_));
  m.impl("repeat.out", TORCH_FN(functionalization::repeat_out_out));
  m.impl("repeat_interleave.Tensor_out", TORCH_FN(functionalization::repeat_interleave_out_Tensor_out));
  m.impl("_mkldnn_reshape.out", TORCH_FN(functionalization::_mkldnn_reshape_out_out));
  m.impl("round.out", TORCH_FN(functionalization::round_out_out));
  m.impl("round_", TORCH_FN(functionalization::round_));
  m.impl("round.decimals_out", TORCH_FN(functionalization::round_out_decimals_out));
  m.impl("round_.decimals", TORCH_FN(functionalization::round__decimals));
  m.impl("relu.out", TORCH_FN(functionalization::relu_out_out));
  m.impl("relu_", TORCH_FN(functionalization::relu_));
  m.impl("gelu.out", TORCH_FN(functionalization::gelu_out_out));
  m.impl("gelu_", TORCH_FN(functionalization::gelu_));
  m.impl("gelu_backward.grad_input", TORCH_FN(functionalization::gelu_backward_out_grad_input));
  m.impl("hardshrink.out", TORCH_FN(functionalization::hardshrink_out_out));
  m.impl("hardshrink_backward.grad_input", TORCH_FN(functionalization::hardshrink_backward_out_grad_input));
  m.impl("rsqrt.out", TORCH_FN(functionalization::rsqrt_out_out));
  m.impl("rsqrt_", TORCH_FN(functionalization::rsqrt_));
  m.impl("select_backward.out", TORCH_FN(functionalization::select_backward_out_out));
  m.impl("celu.out", TORCH_FN(functionalization::celu_out_out));
  m.impl("celu_", TORCH_FN(functionalization::celu_));
  m.impl("silu.out", TORCH_FN(functionalization::silu_out_out));
  m.impl("silu_", TORCH_FN(functionalization::silu_));
  m.impl("silu_backward.grad_input", TORCH_FN(functionalization::silu_backward_out_grad_input));
  m.impl("mish.out", TORCH_FN(functionalization::mish_out_out));
  m.impl("mish_", TORCH_FN(functionalization::mish_));
  m.impl("sigmoid.out", TORCH_FN(functionalization::sigmoid_out_out));
  m.impl("sigmoid_", TORCH_FN(functionalization::sigmoid_));
  m.impl("logit.out", TORCH_FN(functionalization::logit_out_out));
  m.impl("logit_", TORCH_FN(functionalization::logit_));
  m.impl("sin.out", TORCH_FN(functionalization::sin_out_out));
  m.impl("sin_", TORCH_FN(functionalization::sin_));
  m.impl("sinc.out", TORCH_FN(functionalization::sinc_out_out));
  m.impl("sinc_", TORCH_FN(functionalization::sinc_));
  m.impl("sinh.out", TORCH_FN(functionalization::sinh_out_out));
  m.impl("sinh_", TORCH_FN(functionalization::sinh_));
  m.impl("slice_backward.out", TORCH_FN(functionalization::slice_backward_out_out));
  m.impl("slice_scatter.out", TORCH_FN(functionalization::slice_scatter_out_out));
  m.impl("select_scatter.out", TORCH_FN(functionalization::select_scatter_out_out));
  m.impl("diagonal_scatter.out", TORCH_FN(functionalization::diagonal_scatter_out_out));
  m.impl("as_strided_scatter.out", TORCH_FN(functionalization::as_strided_scatter_out_out));
  m.impl("softmax.int_out", TORCH_FN(functionalization::softmax_out_int_out));
  m.impl("_softmax.out", TORCH_FN(functionalization::_softmax_out_out));
  m.impl("_softmax_backward_data.out", TORCH_FN(functionalization::_softmax_backward_data_out_out));
  m.impl("unsafe_split.Tensor_out", TORCH_FN(functionalization::unsafe_split_out_Tensor_out));
  m.impl("unsafe_split_with_sizes.out", TORCH_FN(functionalization::unsafe_split_with_sizes_out_out));
  m.impl("sspaddmm.out", TORCH_FN(functionalization::sspaddmm_out_out));
  m.impl("_chunk_cat.out", TORCH_FN(functionalization::_chunk_cat_out_out));
  m.impl("stack.out", TORCH_FN(functionalization::stack_out_out));
  m.impl("_stack.out", TORCH_FN(functionalization::_stack_out_out));
  m.impl("sum.out", TORCH_FN(functionalization::sum_out_out));
  m.impl("sum.IntList_out", TORCH_FN(functionalization::sum_out_IntList_out));
  m.impl("nansum.out", TORCH_FN(functionalization::nansum_out_out));
  m.impl("sqrt.out", TORCH_FN(functionalization::sqrt_out_out));
  m.impl("sqrt_", TORCH_FN(functionalization::sqrt_));
  m.impl("std.correction_out", TORCH_FN(functionalization::std_out_correction_out));
  m.impl("std_mean.correction_out", TORCH_FN(functionalization::std_mean_out_correction_out));
  m.impl("prod.out", TORCH_FN(functionalization::prod_out_out));
  m.impl("prod.int_out", TORCH_FN(functionalization::prod_out_int_out));
  m.impl("tan.out", TORCH_FN(functionalization::tan_out_out));
  m.impl("tan_", TORCH_FN(functionalization::tan_));
  m.impl("tanh.out", TORCH_FN(functionalization::tanh_out_out));
  m.impl("tanh_", TORCH_FN(functionalization::tanh_));
  m.impl("threshold.out", TORCH_FN(functionalization::threshold_out_out));
  m.impl("threshold_", TORCH_FN(functionalization::threshold_));
  m.impl("threshold_backward.grad_input", TORCH_FN(functionalization::threshold_backward_out_grad_input));
  m.impl("_mkldnn_transpose.out", TORCH_FN(functionalization::_mkldnn_transpose_out_out));
  m.impl("_mkldnn_transpose_", TORCH_FN(functionalization::_mkldnn_transpose_));
  m.impl("flip.out", TORCH_FN(functionalization::flip_out_out));
  m.impl("roll.out", TORCH_FN(functionalization::roll_out_out));
  m.impl("rot90.out", TORCH_FN(functionalization::rot90_out_out));
  m.impl("_transform_bias_rescale_qkv.out", TORCH_FN(functionalization::_transform_bias_rescale_qkv_out_out));
  m.impl("_nested_tensor_from_mask.out", TORCH_FN(functionalization::_nested_tensor_from_mask_out_out));
  m.impl("_nested_from_padded.out", TORCH_FN(functionalization::_nested_from_padded_out_out));
  m.impl("_nested_tensor_size.out", TORCH_FN(functionalization::_nested_tensor_size_out_out));
  m.impl("_nested_tensor_strides.out", TORCH_FN(functionalization::_nested_tensor_strides_out_out));
  m.impl("_nested_tensor_storage_offsets.out", TORCH_FN(functionalization::_nested_tensor_storage_offsets_out_out));
  m.impl("_nested_from_padded_and_nested_example.out", TORCH_FN(functionalization::_nested_from_padded_and_nested_example_out_out));
  m.impl("_nested_view_from_buffer_copy.out", TORCH_FN(functionalization::_nested_view_from_buffer_copy_out_out));
  m.impl("_nested_view_from_jagged_copy.out", TORCH_FN(functionalization::_nested_view_from_jagged_copy_out_out));
  m.impl("_nested_get_values_copy.out", TORCH_FN(functionalization::_nested_get_values_copy_out_out));
  m.impl("_trilinear.out", TORCH_FN(functionalization::_trilinear_out_out));
  m.impl("trunc.out", TORCH_FN(functionalization::trunc_out_out));
  m.impl("trunc_", TORCH_FN(functionalization::trunc_));
  m.impl("_unique.out", TORCH_FN(functionalization::_unique_out_out));
  m.impl("unique_dim.out", TORCH_FN(functionalization::unique_dim_out_out));
  m.impl("unique_consecutive.out", TORCH_FN(functionalization::unique_consecutive_out_out));
  m.impl("unique_dim_consecutive.out", TORCH_FN(functionalization::unique_dim_consecutive_out_out));
  m.impl("_unique2.out", TORCH_FN(functionalization::_unique2_out_out));
  m.impl("_unsafe_view.out", TORCH_FN(functionalization::_unsafe_view_out_out));
  m.impl("var.correction_out", TORCH_FN(functionalization::var_out_correction_out));
  m.impl("var_mean.correction_out", TORCH_FN(functionalization::var_mean_out_correction_out));
  m.impl("where.self_out", TORCH_FN(functionalization::where_out_self_out));
  m.impl("_weight_norm_interface.out", TORCH_FN(functionalization::_weight_norm_interface_out_out));
  m.impl("_weight_norm_interface_backward.out", TORCH_FN(functionalization::_weight_norm_interface_backward_out_out));
  m.impl("zeros.names_out", TORCH_FN(functionalization::zeros_out_names_out));
  m.impl("_efficientzerotensor.out", TORCH_FN(functionalization::_efficientzerotensor_out_out));
  m.impl("zeros.out", TORCH_FN(functionalization::zeros_out_out));
  m.impl("zeros_like.out", TORCH_FN(functionalization::zeros_like_out_out));
  m.impl("_standard_gamma_grad.out", TORCH_FN(functionalization::_standard_gamma_grad_out_out));
  m.impl("_standard_gamma.out", TORCH_FN(functionalization::_standard_gamma_out_out));
  m.impl("_dirichlet_grad.out", TORCH_FN(functionalization::_dirichlet_grad_out_out));
  m.impl("_sample_dirichlet.out", TORCH_FN(functionalization::_sample_dirichlet_out_out));
  m.impl("poisson.out", TORCH_FN(functionalization::poisson_out_out));
  m.impl("binomial.out", TORCH_FN(functionalization::binomial_out_out));
  m.impl("native_norm.out", TORCH_FN(functionalization::native_norm_out_out));
  m.impl("native_norm.ScalarOpt_dim_dtype_out", TORCH_FN(functionalization::native_norm_out_ScalarOpt_dim_dtype_out));
  m.impl("_batch_norm_with_update.out", TORCH_FN(functionalization::_batch_norm_with_update_out_out));
  m.impl("_batch_norm_with_update", TORCH_FN(functionalization::_batch_norm_with_update));
  m.impl("_batch_norm_no_update.out", TORCH_FN(functionalization::_batch_norm_no_update_out_out));
  m.impl("_sparse_sum.dim_out", TORCH_FN(functionalization::_sparse_sum_out_dim_out));
  m.impl("_sparse_sum_backward.out", TORCH_FN(functionalization::_sparse_sum_backward_out_out));
  m.impl("_sparse_csr_sum.dim_dtype_out", TORCH_FN(functionalization::_sparse_csr_sum_out_dim_dtype_out));
  m.impl("_sparse_csr_prod.dim_dtype_out", TORCH_FN(functionalization::_sparse_csr_prod_out_dim_dtype_out));
  m.impl("_sparse_softmax.out", TORCH_FN(functionalization::_sparse_softmax_out_out));
  m.impl("_sparse_softmax_backward_data.out", TORCH_FN(functionalization::_sparse_softmax_backward_data_out_out));
  m.impl("_sparse_log_softmax.out", TORCH_FN(functionalization::_sparse_log_softmax_out_out));
  m.impl("_sparse_log_softmax_backward_data.out", TORCH_FN(functionalization::_sparse_log_softmax_backward_data_out_out));
  m.impl("_spdiags.out", TORCH_FN(functionalization::_spdiags_out_out));
  m.impl("norm.ScalarOpt_dtype_out", TORCH_FN(functionalization::norm_out_ScalarOpt_dtype_out));
  m.impl("norm.Scalar_out", TORCH_FN(functionalization::norm_out_Scalar_out));
  m.impl("norm.dtype_out", TORCH_FN(functionalization::norm_out_dtype_out));
  m.impl("norm.out", TORCH_FN(functionalization::norm_out_out));
  m.impl("frexp.Tensor_out", TORCH_FN(functionalization::frexp_out_Tensor_out));
  m.impl("clone.out", TORCH_FN(functionalization::clone_out_out));
  m.impl("resize_as.out", TORCH_FN(functionalization::resize_as_out_out));
  m.impl("resize_as_", TORCH_FN(functionalization::resize_as_));
  m.impl("resize_as_sparse.out", TORCH_FN(functionalization::resize_as_sparse_out_out));
  m.impl("resize_as_sparse_", TORCH_FN(functionalization::resize_as_sparse_));
  m.impl("zero.out", TORCH_FN(functionalization::zero_out_out));
  m.impl("zero_", TORCH_FN(functionalization::zero_));
  m.impl("sub.out", TORCH_FN(functionalization::sub_out_out));
  m.impl("sub_.Tensor", TORCH_FN(functionalization::sub__Tensor));
  m.impl("sub.Scalar_out", TORCH_FN(functionalization::sub_out_Scalar_out));
  m.impl("sub_.Scalar", TORCH_FN(functionalization::sub__Scalar));
  m.impl("rsub.Tensor_out", TORCH_FN(functionalization::rsub_out_Tensor_out));
  m.impl("heaviside.out", TORCH_FN(functionalization::heaviside_out_out));
  m.impl("heaviside_", TORCH_FN(functionalization::heaviside_));
  m.impl("rsub.Scalar_out", TORCH_FN(functionalization::rsub_out_Scalar_out));
  m.impl("_sparse_addmm.out", TORCH_FN(functionalization::_sparse_addmm_out_out));
  m.impl("sparse_sampled_addmm.out", TORCH_FN(functionalization::sparse_sampled_addmm_out_out));
  m.impl("addmm.out", TORCH_FN(functionalization::addmm_out_out));
  m.impl("addmm_", TORCH_FN(functionalization::addmm_));
  m.impl("_addmm_activation.out", TORCH_FN(functionalization::_addmm_activation_out_out));
  m.impl("_scaled_mm.out", TORCH_FN(functionalization::_scaled_mm_out_out));
  m.impl("sparse_coo_tensor.size_out", TORCH_FN(functionalization::sparse_coo_tensor_out_size_out));
  m.impl("_sparse_coo_tensor_with_dims.out", TORCH_FN(functionalization::_sparse_coo_tensor_with_dims_out_out));
  m.impl("_sparse_coo_tensor_with_dims_and_tensors.out", TORCH_FN(functionalization::_sparse_coo_tensor_with_dims_and_tensors_out_out));
  m.impl("sparse_resize.out", TORCH_FN(functionalization::sparse_resize_out_out));
  m.impl("sparse_resize_", TORCH_FN(functionalization::sparse_resize_));
  m.impl("sparse_resize_and_clear.out", TORCH_FN(functionalization::sparse_resize_and_clear_out_out));
  m.impl("sparse_resize_and_clear_", TORCH_FN(functionalization::sparse_resize_and_clear_));
  m.impl("sparse_mask.out", TORCH_FN(functionalization::sparse_mask_out_out));
  m.impl("_sparse_mask_projection.out", TORCH_FN(functionalization::_sparse_mask_projection_out_out));
  m.impl("_to_dense.out", TORCH_FN(functionalization::_to_dense_out_out));
  m.impl("_coalesce.out", TORCH_FN(functionalization::_coalesce_out_out));
  m.impl("_coalesced.out", TORCH_FN(functionalization::_coalesced_out_out));
  m.impl("_coalesced_", TORCH_FN(functionalization::_coalesced_));
  m.impl("hspmm.out", TORCH_FN(functionalization::hspmm_out_out));
  m.impl("copy_sparse_to_sparse.out", TORCH_FN(functionalization::copy_sparse_to_sparse_out_out));
  m.impl("copy_sparse_to_sparse_", TORCH_FN(functionalization::copy_sparse_to_sparse_));
  m.impl("_to_sparse.sparse_dim_out", TORCH_FN(functionalization::_to_sparse_out_sparse_dim_out));
  m.impl("_to_sparse.out", TORCH_FN(functionalization::_to_sparse_out_out));
  m.impl("_to_sparse_csr.out", TORCH_FN(functionalization::_to_sparse_csr_out_out));
  m.impl("_to_sparse_csc.out", TORCH_FN(functionalization::_to_sparse_csc_out_out));
  m.impl("_to_sparse_bsr.out", TORCH_FN(functionalization::_to_sparse_bsr_out_out));
  m.impl("_to_sparse_bsc.out", TORCH_FN(functionalization::_to_sparse_bsc_out_out));
  m.impl("to_mkldnn.out", TORCH_FN(functionalization::to_mkldnn_out_out));
  m.impl("mkldnn_reorder_conv2d_weight.out", TORCH_FN(functionalization::mkldnn_reorder_conv2d_weight_out_out));
  m.impl("mkldnn_reorder_conv3d_weight.out", TORCH_FN(functionalization::mkldnn_reorder_conv3d_weight_out_out));
  m.impl("quantize_per_tensor_dynamic.out", TORCH_FN(functionalization::quantize_per_tensor_dynamic_out_out));
  m.impl("quantize_per_tensor.out", TORCH_FN(functionalization::quantize_per_tensor_out_out));
  m.impl("quantize_per_tensor.tensor_qparams_out", TORCH_FN(functionalization::quantize_per_tensor_out_tensor_qparams_out));
  m.impl("quantize_per_tensor.tensors_out", TORCH_FN(functionalization::quantize_per_tensor_out_tensors_out));
  m.impl("quantize_per_channel.out", TORCH_FN(functionalization::quantize_per_channel_out_out));
  m.impl("dequantize.self_out", TORCH_FN(functionalization::dequantize_out_self_out));
  m.impl("dequantize.tensors_out", TORCH_FN(functionalization::dequantize_out_tensors_out));
  m.impl("q_per_channel_scales.out", TORCH_FN(functionalization::q_per_channel_scales_out_out));
  m.impl("q_per_channel_zero_points.out", TORCH_FN(functionalization::q_per_channel_zero_points_out_out));
  m.impl("int_repr.out", TORCH_FN(functionalization::int_repr_out_out));
  m.impl("_make_per_tensor_quantized_tensor.out", TORCH_FN(functionalization::_make_per_tensor_quantized_tensor_out_out));
  m.impl("_make_per_channel_quantized_tensor.out", TORCH_FN(functionalization::_make_per_channel_quantized_tensor_out_out));
  m.impl("fake_quantize_per_tensor_affine_cachemask.out", TORCH_FN(functionalization::fake_quantize_per_tensor_affine_cachemask_out_out));
  m.impl("_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out", TORCH_FN(functionalization::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out_out));
  m.impl("_fake_quantize_learnable_per_tensor_affine.out", TORCH_FN(functionalization::_fake_quantize_learnable_per_tensor_affine_out_out));
  m.impl("fake_quantize_per_channel_affine_cachemask.out", TORCH_FN(functionalization::fake_quantize_per_channel_affine_cachemask_out_out));
  m.impl("_fake_quantize_learnable_per_channel_affine.out", TORCH_FN(functionalization::_fake_quantize_learnable_per_channel_affine_out_out));
  m.impl("_fused_moving_avg_obs_fq_helper.out", TORCH_FN(functionalization::_fused_moving_avg_obs_fq_helper_out_out));
  m.impl("_fused_moving_avg_obs_fq_helper", TORCH_FN(functionalization::_fused_moving_avg_obs_fq_helper));
  m.impl("_to_copy.out", TORCH_FN(functionalization::_to_copy_out_out));
  m.impl("_lstm_mps.out", TORCH_FN(functionalization::_lstm_mps_out_out));
  m.impl("lstm_mps_backward.out", TORCH_FN(functionalization::lstm_mps_backward_out_out));
  m.impl("_thnn_fused_lstm_cell.out", TORCH_FN(functionalization::_thnn_fused_lstm_cell_out_out));
  m.impl("_thnn_fused_lstm_cell_backward_impl.out", TORCH_FN(functionalization::_thnn_fused_lstm_cell_backward_impl_out_out));
  m.impl("_thnn_fused_gru_cell.out", TORCH_FN(functionalization::_thnn_fused_gru_cell_out_out));
  m.impl("_thnn_fused_gru_cell_backward.out", TORCH_FN(functionalization::_thnn_fused_gru_cell_backward_out_out));
  m.impl("_pack_padded_sequence.out", TORCH_FN(functionalization::_pack_padded_sequence_out_out));
  m.impl("set.source_Storage_out", TORCH_FN(functionalization::set_out_source_Storage_out));
  m.impl("set_.source_Storage", TORCH_FN(functionalization::set__source_Storage));
  m.impl("set.source_Storage_storage_offset_out", TORCH_FN(functionalization::set_out_source_Storage_storage_offset_out));
  m.impl("set_.source_Storage_storage_offset", TORCH_FN(functionalization::set__source_Storage_storage_offset));
  m.impl("set.out", TORCH_FN(functionalization::set_out_out));
  m.impl("set_", TORCH_FN(functionalization::set_));
  m.impl("lift_fresh_copy.out", TORCH_FN(functionalization::lift_fresh_copy_out_out));
  m.impl("masked_fill.Scalar_out", TORCH_FN(functionalization::masked_fill_out_Scalar_out));
  m.impl("masked_fill_.Scalar", TORCH_FN(functionalization::masked_fill__Scalar));
  m.impl("masked_fill.Tensor_out", TORCH_FN(functionalization::masked_fill_out_Tensor_out));
  m.impl("masked_fill_.Tensor", TORCH_FN(functionalization::masked_fill__Tensor));
  m.impl("masked_scatter.out", TORCH_FN(functionalization::masked_scatter_out_out));
  m.impl("masked_scatter_", TORCH_FN(functionalization::masked_scatter_));
  m.impl("_masked_softmax.out", TORCH_FN(functionalization::_masked_softmax_out_out));
  m.impl("_masked_softmax_backward.out", TORCH_FN(functionalization::_masked_softmax_backward_out_out));
  m.impl("put.out", TORCH_FN(functionalization::put_out_out));
  m.impl("put_", TORCH_FN(functionalization::put_));
  m.impl("index_add.out", TORCH_FN(functionalization::index_add_out_out));
  m.impl("index_add_", TORCH_FN(functionalization::index_add_));
  m.impl("index_reduce.out", TORCH_FN(functionalization::index_reduce_out_out));
  m.impl("index_reduce_", TORCH_FN(functionalization::index_reduce_));
  m.impl("index_fill.int_Scalar_out", TORCH_FN(functionalization::index_fill_out_int_Scalar_out));
  m.impl("index_fill_.int_Scalar", TORCH_FN(functionalization::index_fill__int_Scalar));
  m.impl("index_fill.int_Tensor_out", TORCH_FN(functionalization::index_fill_out_int_Tensor_out));
  m.impl("index_fill_.int_Tensor", TORCH_FN(functionalization::index_fill__int_Tensor));
  m.impl("scatter.src_out", TORCH_FN(functionalization::scatter_out_src_out));
  m.impl("scatter_.src", TORCH_FN(functionalization::scatter__src));
  m.impl("scatter.value_out", TORCH_FN(functionalization::scatter_out_value_out));
  m.impl("scatter_.value", TORCH_FN(functionalization::scatter__value));
  m.impl("scatter.reduce_out", TORCH_FN(functionalization::scatter_out_reduce_out));
  m.impl("scatter_.reduce", TORCH_FN(functionalization::scatter__reduce));
  m.impl("scatter.value_reduce_out", TORCH_FN(functionalization::scatter_out_value_reduce_out));
  m.impl("scatter_.value_reduce", TORCH_FN(functionalization::scatter__value_reduce));
  m.impl("scatter_add.out", TORCH_FN(functionalization::scatter_add_out_out));
  m.impl("scatter_add_", TORCH_FN(functionalization::scatter_add_));
  m.impl("scatter_reduce.two_out", TORCH_FN(functionalization::scatter_reduce_out_two_out));
  m.impl("scatter_reduce_.two", TORCH_FN(functionalization::scatter_reduce__two));
  m.impl("eq.Scalar_out", TORCH_FN(functionalization::eq_out_Scalar_out));
  m.impl("eq_.Scalar", TORCH_FN(functionalization::eq__Scalar));
  m.impl("eq.Tensor_out", TORCH_FN(functionalization::eq_out_Tensor_out));
  m.impl("eq_.Tensor", TORCH_FN(functionalization::eq__Tensor));
  m.impl("bitwise_and.Tensor_out", TORCH_FN(functionalization::bitwise_and_out_Tensor_out));
  m.impl("bitwise_and_.Tensor", TORCH_FN(functionalization::bitwise_and__Tensor));
  m.impl("bitwise_and.Scalar_out", TORCH_FN(functionalization::bitwise_and_out_Scalar_out));
  m.impl("bitwise_and_.Scalar", TORCH_FN(functionalization::bitwise_and__Scalar));
  m.impl("bitwise_and.Scalar_Tensor_out", TORCH_FN(functionalization::bitwise_and_out_Scalar_Tensor_out));
  m.impl("bitwise_or.Tensor_out", TORCH_FN(functionalization::bitwise_or_out_Tensor_out));
  m.impl("bitwise_or_.Tensor", TORCH_FN(functionalization::bitwise_or__Tensor));
  m.impl("bitwise_or.Scalar_out", TORCH_FN(functionalization::bitwise_or_out_Scalar_out));
  m.impl("bitwise_or_.Scalar", TORCH_FN(functionalization::bitwise_or__Scalar));
  m.impl("bitwise_or.Scalar_Tensor_out", TORCH_FN(functionalization::bitwise_or_out_Scalar_Tensor_out));
  m.impl("bitwise_xor.Tensor_out", TORCH_FN(functionalization::bitwise_xor_out_Tensor_out));
  m.impl("bitwise_xor_.Tensor", TORCH_FN(functionalization::bitwise_xor__Tensor));
  m.impl("bitwise_xor.Scalar_out", TORCH_FN(functionalization::bitwise_xor_out_Scalar_out));
  m.impl("bitwise_xor_.Scalar", TORCH_FN(functionalization::bitwise_xor__Scalar));
  m.impl("bitwise_xor.Scalar_Tensor_out", TORCH_FN(functionalization::bitwise_xor_out_Scalar_Tensor_out));
  m.impl("__lshift__.Scalar_out", TORCH_FN(functionalization::__lshift___out_Scalar_out));
  m.impl("__ilshift__.Scalar", TORCH_FN(functionalization::__ilshift___Scalar));
  m.impl("__lshift__.Tensor_out", TORCH_FN(functionalization::__lshift___out_Tensor_out));
  m.impl("__ilshift__.Tensor", TORCH_FN(functionalization::__ilshift___Tensor));
  m.impl("bitwise_left_shift.Tensor_out", TORCH_FN(functionalization::bitwise_left_shift_out_Tensor_out));
  m.impl("bitwise_left_shift_.Tensor", TORCH_FN(functionalization::bitwise_left_shift__Tensor));
  m.impl("bitwise_left_shift.Tensor_Scalar_out", TORCH_FN(functionalization::bitwise_left_shift_out_Tensor_Scalar_out));
  m.impl("bitwise_left_shift_.Tensor_Scalar", TORCH_FN(functionalization::bitwise_left_shift__Tensor_Scalar));
  m.impl("bitwise_left_shift.Scalar_Tensor_out", TORCH_FN(functionalization::bitwise_left_shift_out_Scalar_Tensor_out));
  m.impl("__rshift__.Scalar_out", TORCH_FN(functionalization::__rshift___out_Scalar_out));
  m.impl("__irshift__.Scalar", TORCH_FN(functionalization::__irshift___Scalar));
  m.impl("__rshift__.Tensor_out", TORCH_FN(functionalization::__rshift___out_Tensor_out));
  m.impl("__irshift__.Tensor", TORCH_FN(functionalization::__irshift___Tensor));
  m.impl("bitwise_right_shift.Tensor_out", TORCH_FN(functionalization::bitwise_right_shift_out_Tensor_out));
  m.impl("bitwise_right_shift_.Tensor", TORCH_FN(functionalization::bitwise_right_shift__Tensor));
  m.impl("bitwise_right_shift.Tensor_Scalar_out", TORCH_FN(functionalization::bitwise_right_shift_out_Tensor_Scalar_out));
  m.impl("bitwise_right_shift_.Tensor_Scalar", TORCH_FN(functionalization::bitwise_right_shift__Tensor_Scalar));
  m.impl("bitwise_right_shift.Scalar_Tensor_out", TORCH_FN(functionalization::bitwise_right_shift_out_Scalar_Tensor_out));
  m.impl("tril.out", TORCH_FN(functionalization::tril_out_out));
  m.impl("tril_", TORCH_FN(functionalization::tril_));
  m.impl("triu.out", TORCH_FN(functionalization::triu_out_out));
  m.impl("triu_", TORCH_FN(functionalization::triu_));
  m.impl("digamma.out", TORCH_FN(functionalization::digamma_out_out));
  m.impl("digamma_", TORCH_FN(functionalization::digamma_));
  m.impl("lerp.Scalar_out", TORCH_FN(functionalization::lerp_out_Scalar_out));
  m.impl("lerp_.Scalar", TORCH_FN(functionalization::lerp__Scalar));
  m.impl("lerp.Tensor_out", TORCH_FN(functionalization::lerp_out_Tensor_out));
  m.impl("lerp_.Tensor", TORCH_FN(functionalization::lerp__Tensor));
  m.impl("addbmm.out", TORCH_FN(functionalization::addbmm_out_out));
  m.impl("addbmm_", TORCH_FN(functionalization::addbmm_));
  m.impl("random.from_out", TORCH_FN(functionalization::random_out_from_out));
  m.impl("random_.from", TORCH_FN(functionalization::random__from));
  m.impl("random.to_out", TORCH_FN(functionalization::random_out_to_out));
  m.impl("random_.to", TORCH_FN(functionalization::random__to));
  m.impl("random.out", TORCH_FN(functionalization::random_out_out));
  m.impl("random_", TORCH_FN(functionalization::random_));
  m.impl("uniform.out", TORCH_FN(functionalization::uniform_out_out));
  m.impl("uniform_", TORCH_FN(functionalization::uniform_));
  m.impl("cauchy.out", TORCH_FN(functionalization::cauchy_out_out));
  m.impl("cauchy_", TORCH_FN(functionalization::cauchy_));
  m.impl("log_normal.out", TORCH_FN(functionalization::log_normal_out_out));
  m.impl("log_normal_", TORCH_FN(functionalization::log_normal_));
  m.impl("exponential.out", TORCH_FN(functionalization::exponential_out_out));
  m.impl("exponential_", TORCH_FN(functionalization::exponential_));
  m.impl("geometric.out", TORCH_FN(functionalization::geometric_out_out));
  m.impl("geometric_", TORCH_FN(functionalization::geometric_));
  m.impl("tril_indices.out", TORCH_FN(functionalization::tril_indices_out_out));
  m.impl("triu_indices.out", TORCH_FN(functionalization::triu_indices_out_out));
  m.impl("trace.out", TORCH_FN(functionalization::trace_out_out));
  m.impl("ne.Scalar_out", TORCH_FN(functionalization::ne_out_Scalar_out));
  m.impl("ne_.Scalar", TORCH_FN(functionalization::ne__Scalar));
  m.impl("ne.Tensor_out", TORCH_FN(functionalization::ne_out_Tensor_out));
  m.impl("ne_.Tensor", TORCH_FN(functionalization::ne__Tensor));
  m.impl("ge.Scalar_out", TORCH_FN(functionalization::ge_out_Scalar_out));
  m.impl("ge_.Scalar", TORCH_FN(functionalization::ge__Scalar));
  m.impl("ge.Tensor_out", TORCH_FN(functionalization::ge_out_Tensor_out));
  m.impl("ge_.Tensor", TORCH_FN(functionalization::ge__Tensor));
  m.impl("le.Scalar_out", TORCH_FN(functionalization::le_out_Scalar_out));
  m.impl("le_.Scalar", TORCH_FN(functionalization::le__Scalar));
  m.impl("le.Tensor_out", TORCH_FN(functionalization::le_out_Tensor_out));
  m.impl("le_.Tensor", TORCH_FN(functionalization::le__Tensor));
  m.impl("gt.Scalar_out", TORCH_FN(functionalization::gt_out_Scalar_out));
  m.impl("gt_.Scalar", TORCH_FN(functionalization::gt__Scalar));
  m.impl("gt.Tensor_out", TORCH_FN(functionalization::gt_out_Tensor_out));
  m.impl("gt_.Tensor", TORCH_FN(functionalization::gt__Tensor));
  m.impl("lt.Scalar_out", TORCH_FN(functionalization::lt_out_Scalar_out));
  m.impl("lt_.Scalar", TORCH_FN(functionalization::lt__Scalar));
  m.impl("lt.Tensor_out", TORCH_FN(functionalization::lt_out_Tensor_out));
  m.impl("lt_.Tensor", TORCH_FN(functionalization::lt__Tensor));
  m.impl("take.out", TORCH_FN(functionalization::take_out_out));
  m.impl("index_select.out", TORCH_FN(functionalization::index_select_out_out));
  m.impl("masked_select.out", TORCH_FN(functionalization::masked_select_out_out));
  m.impl("nonzero.out", TORCH_FN(functionalization::nonzero_out_out));
  m.impl("nonzero_static.out", TORCH_FN(functionalization::nonzero_static_out_out));
  m.impl("gather.out", TORCH_FN(functionalization::gather_out_out));
  m.impl("addcmul.out", TORCH_FN(functionalization::addcmul_out_out));
  m.impl("addcmul_", TORCH_FN(functionalization::addcmul_));
  m.impl("addcdiv.out", TORCH_FN(functionalization::addcdiv_out_out));
  m.impl("addcdiv_", TORCH_FN(functionalization::addcdiv_));
  m.impl("triangular_solve.X", TORCH_FN(functionalization::triangular_solve_out_X));
  m.impl("linalg_solve_triangular.out", TORCH_FN(functionalization::linalg_solve_triangular_out_out));
  m.impl("cholesky.out", TORCH_FN(functionalization::cholesky_out_out));
  m.impl("cholesky_solve.out", TORCH_FN(functionalization::cholesky_solve_out_out));
  m.impl("_cholesky_solve_helper.out", TORCH_FN(functionalization::_cholesky_solve_helper_out_out));
  m.impl("cholesky_inverse.out", TORCH_FN(functionalization::cholesky_inverse_out_out));
  m.impl("geqrf.a", TORCH_FN(functionalization::geqrf_out_a));
  m.impl("ormqr.out", TORCH_FN(functionalization::ormqr_out_out));
  m.impl("lu_unpack.out", TORCH_FN(functionalization::lu_unpack_out_out));
  m.impl("multinomial.out", TORCH_FN(functionalization::multinomial_out_out));
  m.impl("lgamma.out", TORCH_FN(functionalization::lgamma_out_out));
  m.impl("lgamma_", TORCH_FN(functionalization::lgamma_));
  m.impl("polygamma.out", TORCH_FN(functionalization::polygamma_out_out));
  m.impl("erfinv.out", TORCH_FN(functionalization::erfinv_out_out));
  m.impl("erfinv_", TORCH_FN(functionalization::erfinv_));
  m.impl("i0.out", TORCH_FN(functionalization::i0_out_out));
  m.impl("i0_", TORCH_FN(functionalization::i0_));
  m.impl("sign.out", TORCH_FN(functionalization::sign_out_out));
  m.impl("sign_", TORCH_FN(functionalization::sign_));
  m.impl("signbit.out", TORCH_FN(functionalization::signbit_out_out));
  m.impl("dist.out", TORCH_FN(functionalization::dist_out_out));
  m.impl("atan2.out", TORCH_FN(functionalization::atan2_out_out));
  m.impl("atan2_", TORCH_FN(functionalization::atan2_));
  m.impl("histc.out", TORCH_FN(functionalization::histc_out_out));
  m.impl("histogram.bins_tensor_out", TORCH_FN(functionalization::histogram_out_bins_tensor_out));
  m.impl("histogram.bin_ct_out", TORCH_FN(functionalization::histogram_out_bin_ct_out));
  m.impl("_histogramdd_bin_edges.out", TORCH_FN(functionalization::_histogramdd_bin_edges_out_out));
  m.impl("_histogramdd_from_bin_cts.out", TORCH_FN(functionalization::_histogramdd_from_bin_cts_out_out));
  m.impl("_histogramdd_from_bin_tensors.out", TORCH_FN(functionalization::_histogramdd_from_bin_tensors_out_out));
  m.impl("fmod.Scalar_out", TORCH_FN(functionalization::fmod_out_Scalar_out));
  m.impl("fmod_.Scalar", TORCH_FN(functionalization::fmod__Scalar));
  m.impl("fmod.Tensor_out", TORCH_FN(functionalization::fmod_out_Tensor_out));
  m.impl("fmod_.Tensor", TORCH_FN(functionalization::fmod__Tensor));
  m.impl("hypot.out", TORCH_FN(functionalization::hypot_out_out));
  m.impl("hypot_", TORCH_FN(functionalization::hypot_));
  m.impl("igamma.out", TORCH_FN(functionalization::igamma_out_out));
  m.impl("igamma_", TORCH_FN(functionalization::igamma_));
  m.impl("igammac.out", TORCH_FN(functionalization::igammac_out_out));
  m.impl("igammac_", TORCH_FN(functionalization::igammac_));
  m.impl("nextafter.out", TORCH_FN(functionalization::nextafter_out_out));
  m.impl("nextafter_", TORCH_FN(functionalization::nextafter_));
  m.impl("remainder.Scalar_out", TORCH_FN(functionalization::remainder_out_Scalar_out));
  m.impl("remainder_.Scalar", TORCH_FN(functionalization::remainder__Scalar));
  m.impl("remainder.Tensor_out", TORCH_FN(functionalization::remainder_out_Tensor_out));
  m.impl("remainder_.Tensor", TORCH_FN(functionalization::remainder__Tensor));
  m.impl("remainder.Scalar_Tensor_out", TORCH_FN(functionalization::remainder_out_Scalar_Tensor_out));
  m.impl("min.unary_out", TORCH_FN(functionalization::min_out_unary_out));
  m.impl("fmin.out", TORCH_FN(functionalization::fmin_out_out));
  m.impl("max.unary_out", TORCH_FN(functionalization::max_out_unary_out));
  m.impl("fmax.out", TORCH_FN(functionalization::fmax_out_out));
  m.impl("maximum.out", TORCH_FN(functionalization::maximum_out_out));
  m.impl("minimum.out", TORCH_FN(functionalization::minimum_out_out));
  m.impl("sort.values", TORCH_FN(functionalization::sort_out_values));
  m.impl("sort.values_stable", TORCH_FN(functionalization::sort_out_values_stable));
  m.impl("topk.values", TORCH_FN(functionalization::topk_out_values));
  m.impl("all.all_out", TORCH_FN(functionalization::all_out_all_out));
  m.impl("any.all_out", TORCH_FN(functionalization::any_out_all_out));
  m.impl("renorm.out", TORCH_FN(functionalization::renorm_out_out));
  m.impl("renorm_", TORCH_FN(functionalization::renorm_));
  m.impl("unfold_backward.out", TORCH_FN(functionalization::unfold_backward_out_out));
  m.impl("pow.Tensor_Tensor_out", TORCH_FN(functionalization::pow_out_Tensor_Tensor_out));
  m.impl("pow_.Tensor", TORCH_FN(functionalization::pow__Tensor));
  m.impl("pow.Scalar_out", TORCH_FN(functionalization::pow_out_Scalar_out));
  m.impl("pow.Tensor_Scalar_out", TORCH_FN(functionalization::pow_out_Tensor_Scalar_out));
  m.impl("pow_.Scalar", TORCH_FN(functionalization::pow__Scalar));
  m.impl("normal.out", TORCH_FN(functionalization::normal_out_out));
  m.impl("normal_", TORCH_FN(functionalization::normal_));
  m.impl("normal.Tensor_float_out", TORCH_FN(functionalization::normal_out_Tensor_float_out));
  m.impl("normal.float_Tensor_out", TORCH_FN(functionalization::normal_out_float_Tensor_out));
  m.impl("normal.Tensor_Tensor_out", TORCH_FN(functionalization::normal_out_Tensor_Tensor_out));
  m.impl("normal.float_float_out", TORCH_FN(functionalization::normal_out_float_float_out));
  m.impl("_amp_foreach_non_finite_check_and_unscale.out", TORCH_FN(functionalization::_amp_foreach_non_finite_check_and_unscale_out_out));
  m.impl("_amp_foreach_non_finite_check_and_unscale_", TORCH_FN(functionalization::_amp_foreach_non_finite_check_and_unscale_));
  m.impl("_amp_update_scale.out", TORCH_FN(functionalization::_amp_update_scale_out_out));
  m.impl("_amp_update_scale_", TORCH_FN(functionalization::_amp_update_scale_));
  m.impl("_foreach_add.Scalar_out", TORCH_FN(functionalization::_foreach_add_out_Scalar_out));
  m.impl("_foreach_add_.Scalar", TORCH_FN(functionalization::_foreach_add__Scalar));
  m.impl("_foreach_add.List_out", TORCH_FN(functionalization::_foreach_add_out_List_out));
  m.impl("_foreach_add_.List", TORCH_FN(functionalization::_foreach_add__List));
  m.impl("_foreach_add.ScalarList_out", TORCH_FN(functionalization::_foreach_add_out_ScalarList_out));
  m.impl("_foreach_add_.ScalarList", TORCH_FN(functionalization::_foreach_add__ScalarList));
  m.impl("_foreach_add.Tensor_out", TORCH_FN(functionalization::_foreach_add_out_Tensor_out));
  m.impl("_foreach_add_.Tensor", TORCH_FN(functionalization::_foreach_add__Tensor));
  m.impl("_foreach_sub.Scalar_out", TORCH_FN(functionalization::_foreach_sub_out_Scalar_out));
  m.impl("_foreach_sub_.Scalar", TORCH_FN(functionalization::_foreach_sub__Scalar));
  m.impl("_foreach_sub.List_out", TORCH_FN(functionalization::_foreach_sub_out_List_out));
  m.impl("_foreach_sub_.List", TORCH_FN(functionalization::_foreach_sub__List));
  m.impl("_foreach_sub.ScalarList_out", TORCH_FN(functionalization::_foreach_sub_out_ScalarList_out));
  m.impl("_foreach_sub_.ScalarList", TORCH_FN(functionalization::_foreach_sub__ScalarList));
  m.impl("_foreach_mul.Scalar_out", TORCH_FN(functionalization::_foreach_mul_out_Scalar_out));
  m.impl("_foreach_mul_.Scalar", TORCH_FN(functionalization::_foreach_mul__Scalar));
  m.impl("_foreach_mul.List_out", TORCH_FN(functionalization::_foreach_mul_out_List_out));
  m.impl("_foreach_mul_.List", TORCH_FN(functionalization::_foreach_mul__List));
  m.impl("_foreach_mul.ScalarList_out", TORCH_FN(functionalization::_foreach_mul_out_ScalarList_out));
  m.impl("_foreach_mul_.ScalarList", TORCH_FN(functionalization::_foreach_mul__ScalarList));
  m.impl("_foreach_mul.Tensor_out", TORCH_FN(functionalization::_foreach_mul_out_Tensor_out));
  m.impl("_foreach_mul_.Tensor", TORCH_FN(functionalization::_foreach_mul__Tensor));
  m.impl("_foreach_div.Scalar_out", TORCH_FN(functionalization::_foreach_div_out_Scalar_out));
  m.impl("_foreach_div_.Scalar", TORCH_FN(functionalization::_foreach_div__Scalar));
  m.impl("_foreach_div.List_out", TORCH_FN(functionalization::_foreach_div_out_List_out));
  m.impl("_foreach_div_.List", TORCH_FN(functionalization::_foreach_div__List));
  m.impl("_foreach_div.ScalarList_out", TORCH_FN(functionalization::_foreach_div_out_ScalarList_out));
  m.impl("_foreach_div_.ScalarList", TORCH_FN(functionalization::_foreach_div__ScalarList));
  m.impl("_foreach_div.Tensor_out", TORCH_FN(functionalization::_foreach_div_out_Tensor_out));
  m.impl("_foreach_div_.Tensor", TORCH_FN(functionalization::_foreach_div__Tensor));
  m.impl("_foreach_clamp_max.Scalar_out", TORCH_FN(functionalization::_foreach_clamp_max_out_Scalar_out));
  m.impl("_foreach_clamp_max_.Scalar", TORCH_FN(functionalization::_foreach_clamp_max__Scalar));
  m.impl("_foreach_clamp_max.List_out", TORCH_FN(functionalization::_foreach_clamp_max_out_List_out));
  m.impl("_foreach_clamp_max_.List", TORCH_FN(functionalization::_foreach_clamp_max__List));
  m.impl("_foreach_clamp_max.ScalarList_out", TORCH_FN(functionalization::_foreach_clamp_max_out_ScalarList_out));
  m.impl("_foreach_clamp_max_.ScalarList", TORCH_FN(functionalization::_foreach_clamp_max__ScalarList));
  m.impl("_foreach_clamp_min.Scalar_out", TORCH_FN(functionalization::_foreach_clamp_min_out_Scalar_out));
  m.impl("_foreach_clamp_min_.Scalar", TORCH_FN(functionalization::_foreach_clamp_min__Scalar));
  m.impl("_foreach_clamp_min.List_out", TORCH_FN(functionalization::_foreach_clamp_min_out_List_out));
  m.impl("_foreach_clamp_min_.List", TORCH_FN(functionalization::_foreach_clamp_min__List));
  m.impl("_foreach_clamp_min.ScalarList_out", TORCH_FN(functionalization::_foreach_clamp_min_out_ScalarList_out));
  m.impl("_foreach_clamp_min_.ScalarList", TORCH_FN(functionalization::_foreach_clamp_min__ScalarList));
  m.impl("_foreach_maximum.Scalar_out", TORCH_FN(functionalization::_foreach_maximum_out_Scalar_out));
  m.impl("_foreach_maximum_.Scalar", TORCH_FN(functionalization::_foreach_maximum__Scalar));
  m.impl("_foreach_maximum.List_out", TORCH_FN(functionalization::_foreach_maximum_out_List_out));
  m.impl("_foreach_maximum_.List", TORCH_FN(functionalization::_foreach_maximum__List));
  m.impl("_foreach_maximum.ScalarList_out", TORCH_FN(functionalization::_foreach_maximum_out_ScalarList_out));
  m.impl("_foreach_maximum_.ScalarList", TORCH_FN(functionalization::_foreach_maximum__ScalarList));
  m.impl("_foreach_minimum.Scalar_out", TORCH_FN(functionalization::_foreach_minimum_out_Scalar_out));
  m.impl("_foreach_minimum_.Scalar", TORCH_FN(functionalization::_foreach_minimum__Scalar));
  m.impl("_foreach_minimum.List_out", TORCH_FN(functionalization::_foreach_minimum_out_List_out));
  m.impl("_foreach_minimum_.List", TORCH_FN(functionalization::_foreach_minimum__List));
  m.impl("_foreach_minimum.ScalarList_out", TORCH_FN(functionalization::_foreach_minimum_out_ScalarList_out));
  m.impl("_foreach_minimum_.ScalarList", TORCH_FN(functionalization::_foreach_minimum__ScalarList));
  m.impl("_foreach_addcdiv.Scalar_out", TORCH_FN(functionalization::_foreach_addcdiv_out_Scalar_out));
  m.impl("_foreach_addcdiv_.Scalar", TORCH_FN(functionalization::_foreach_addcdiv__Scalar));
  m.impl("_foreach_addcdiv.ScalarList_out", TORCH_FN(functionalization::_foreach_addcdiv_out_ScalarList_out));
  m.impl("_foreach_addcdiv_.ScalarList", TORCH_FN(functionalization::_foreach_addcdiv__ScalarList));
  m.impl("_foreach_addcdiv.Tensor_out", TORCH_FN(functionalization::_foreach_addcdiv_out_Tensor_out));
  m.impl("_foreach_addcdiv_.Tensor", TORCH_FN(functionalization::_foreach_addcdiv__Tensor));
  m.impl("_foreach_addcmul.Scalar_out", TORCH_FN(functionalization::_foreach_addcmul_out_Scalar_out));
  m.impl("_foreach_addcmul_.Scalar", TORCH_FN(functionalization::_foreach_addcmul__Scalar));
  m.impl("_foreach_addcmul.ScalarList_out", TORCH_FN(functionalization::_foreach_addcmul_out_ScalarList_out));
  m.impl("_foreach_addcmul_.ScalarList", TORCH_FN(functionalization::_foreach_addcmul__ScalarList));
  m.impl("_foreach_addcmul.Tensor_out", TORCH_FN(functionalization::_foreach_addcmul_out_Tensor_out));
  m.impl("_foreach_addcmul_.Tensor", TORCH_FN(functionalization::_foreach_addcmul__Tensor));
  m.impl("_foreach_abs.out", TORCH_FN(functionalization::_foreach_abs_out_out));
  m.impl("_foreach_abs_", TORCH_FN(functionalization::_foreach_abs_));
  m.impl("_foreach_acos.out", TORCH_FN(functionalization::_foreach_acos_out_out));
  m.impl("_foreach_acos_", TORCH_FN(functionalization::_foreach_acos_));
  m.impl("_foreach_asin.out", TORCH_FN(functionalization::_foreach_asin_out_out));
  m.impl("_foreach_asin_", TORCH_FN(functionalization::_foreach_asin_));
  m.impl("_foreach_atan.out", TORCH_FN(functionalization::_foreach_atan_out_out));
  m.impl("_foreach_atan_", TORCH_FN(functionalization::_foreach_atan_));
  m.impl("_foreach_ceil.out", TORCH_FN(functionalization::_foreach_ceil_out_out));
  m.impl("_foreach_ceil_", TORCH_FN(functionalization::_foreach_ceil_));
  m.impl("_foreach_cos.out", TORCH_FN(functionalization::_foreach_cos_out_out));
  m.impl("_foreach_cos_", TORCH_FN(functionalization::_foreach_cos_));
  m.impl("_foreach_cosh.out", TORCH_FN(functionalization::_foreach_cosh_out_out));
  m.impl("_foreach_cosh_", TORCH_FN(functionalization::_foreach_cosh_));
  m.impl("_foreach_erf.out", TORCH_FN(functionalization::_foreach_erf_out_out));
  m.impl("_foreach_erf_", TORCH_FN(functionalization::_foreach_erf_));
  m.impl("_foreach_erfc.out", TORCH_FN(functionalization::_foreach_erfc_out_out));
  m.impl("_foreach_erfc_", TORCH_FN(functionalization::_foreach_erfc_));
  m.impl("_foreach_exp.out", TORCH_FN(functionalization::_foreach_exp_out_out));
  m.impl("_foreach_exp_", TORCH_FN(functionalization::_foreach_exp_));
  m.impl("_foreach_expm1.out", TORCH_FN(functionalization::_foreach_expm1_out_out));
  m.impl("_foreach_expm1_", TORCH_FN(functionalization::_foreach_expm1_));
  m.impl("_foreach_floor.out", TORCH_FN(functionalization::_foreach_floor_out_out));
  m.impl("_foreach_floor_", TORCH_FN(functionalization::_foreach_floor_));
  m.impl("_foreach_frac.out", TORCH_FN(functionalization::_foreach_frac_out_out));
  m.impl("_foreach_frac_", TORCH_FN(functionalization::_foreach_frac_));
  m.impl("_foreach_lerp.List_out", TORCH_FN(functionalization::_foreach_lerp_out_List_out));
  m.impl("_foreach_lerp_.List", TORCH_FN(functionalization::_foreach_lerp__List));
  m.impl("_foreach_lerp.Scalar_out", TORCH_FN(functionalization::_foreach_lerp_out_Scalar_out));
  m.impl("_foreach_lerp_.Scalar", TORCH_FN(functionalization::_foreach_lerp__Scalar));
  m.impl("_foreach_lerp.ScalarList_out", TORCH_FN(functionalization::_foreach_lerp_out_ScalarList_out));
  m.impl("_foreach_lerp_.ScalarList", TORCH_FN(functionalization::_foreach_lerp__ScalarList));
  m.impl("_foreach_lgamma.out", TORCH_FN(functionalization::_foreach_lgamma_out_out));
  m.impl("_foreach_lgamma_", TORCH_FN(functionalization::_foreach_lgamma_));
  m.impl("_foreach_log.out", TORCH_FN(functionalization::_foreach_log_out_out));
  m.impl("_foreach_log_", TORCH_FN(functionalization::_foreach_log_));
  m.impl("_foreach_log10.out", TORCH_FN(functionalization::_foreach_log10_out_out));
  m.impl("_foreach_log10_", TORCH_FN(functionalization::_foreach_log10_));
  m.impl("_foreach_log1p.out", TORCH_FN(functionalization::_foreach_log1p_out_out));
  m.impl("_foreach_log1p_", TORCH_FN(functionalization::_foreach_log1p_));
  m.impl("_foreach_log2.out", TORCH_FN(functionalization::_foreach_log2_out_out));
  m.impl("_foreach_log2_", TORCH_FN(functionalization::_foreach_log2_));
  m.impl("_foreach_max.out", TORCH_FN(functionalization::_foreach_max_out_out));
  m.impl("_foreach_neg.out", TORCH_FN(functionalization::_foreach_neg_out_out));
  m.impl("_foreach_neg_", TORCH_FN(functionalization::_foreach_neg_));
  m.impl("_foreach_norm.Scalar_out", TORCH_FN(functionalization::_foreach_norm_out_Scalar_out));
  m.impl("_foreach_pow.List_out", TORCH_FN(functionalization::_foreach_pow_out_List_out));
  m.impl("_foreach_pow_.List", TORCH_FN(functionalization::_foreach_pow__List));
  m.impl("_foreach_pow.Scalar_out", TORCH_FN(functionalization::_foreach_pow_out_Scalar_out));
  m.impl("_foreach_pow_.Scalar", TORCH_FN(functionalization::_foreach_pow__Scalar));
  m.impl("_foreach_pow.ScalarList_out", TORCH_FN(functionalization::_foreach_pow_out_ScalarList_out));
  m.impl("_foreach_pow_.ScalarList", TORCH_FN(functionalization::_foreach_pow__ScalarList));
  m.impl("_foreach_reciprocal.out", TORCH_FN(functionalization::_foreach_reciprocal_out_out));
  m.impl("_foreach_reciprocal_", TORCH_FN(functionalization::_foreach_reciprocal_));
  m.impl("_foreach_round.out", TORCH_FN(functionalization::_foreach_round_out_out));
  m.impl("_foreach_round_", TORCH_FN(functionalization::_foreach_round_));
  m.impl("_foreach_rsqrt.out", TORCH_FN(functionalization::_foreach_rsqrt_out_out));
  m.impl("_foreach_rsqrt_", TORCH_FN(functionalization::_foreach_rsqrt_));
  m.impl("_foreach_sigmoid.out", TORCH_FN(functionalization::_foreach_sigmoid_out_out));
  m.impl("_foreach_sigmoid_", TORCH_FN(functionalization::_foreach_sigmoid_));
  m.impl("_foreach_sign.out", TORCH_FN(functionalization::_foreach_sign_out_out));
  m.impl("_foreach_sign_", TORCH_FN(functionalization::_foreach_sign_));
  m.impl("_foreach_sin.out", TORCH_FN(functionalization::_foreach_sin_out_out));
  m.impl("_foreach_sin_", TORCH_FN(functionalization::_foreach_sin_));
  m.impl("_foreach_sinh.out", TORCH_FN(functionalization::_foreach_sinh_out_out));
  m.impl("_foreach_sinh_", TORCH_FN(functionalization::_foreach_sinh_));
  m.impl("_foreach_sqrt.out", TORCH_FN(functionalization::_foreach_sqrt_out_out));
  m.impl("_foreach_sqrt_", TORCH_FN(functionalization::_foreach_sqrt_));
  m.impl("_foreach_tan.out", TORCH_FN(functionalization::_foreach_tan_out_out));
  m.impl("_foreach_tan_", TORCH_FN(functionalization::_foreach_tan_));
  m.impl("_foreach_tanh.out", TORCH_FN(functionalization::_foreach_tanh_out_out));
  m.impl("_foreach_tanh_", TORCH_FN(functionalization::_foreach_tanh_));
  m.impl("_foreach_trunc.out", TORCH_FN(functionalization::_foreach_trunc_out_out));
  m.impl("_foreach_trunc_", TORCH_FN(functionalization::_foreach_trunc_));
  m.impl("_foreach_zero.out", TORCH_FN(functionalization::_foreach_zero_out_out));
  m.impl("_foreach_zero_", TORCH_FN(functionalization::_foreach_zero_));
  m.impl("_foreach_copy.out", TORCH_FN(functionalization::_foreach_copy_out_out));
  m.impl("_foreach_copy_", TORCH_FN(functionalization::_foreach_copy_));
  m.impl("bucketize.Tensor_out", TORCH_FN(functionalization::bucketize_out_Tensor_out));
  m.impl("bucketize.Scalar_out", TORCH_FN(functionalization::bucketize_out_Scalar_out));
  m.impl("searchsorted.Tensor_out", TORCH_FN(functionalization::searchsorted_out_Tensor_out));
  m.impl("searchsorted.Scalar_out", TORCH_FN(functionalization::searchsorted_out_Scalar_out));
  m.impl("_convert_indices_from_coo_to_csr.out", TORCH_FN(functionalization::_convert_indices_from_coo_to_csr_out_out));
  m.impl("_convert_indices_from_csr_to_coo.out", TORCH_FN(functionalization::_convert_indices_from_csr_to_coo_out_out));
  m.impl("mse_loss.out", TORCH_FN(functionalization::mse_loss_out_out));
  m.impl("mse_loss_backward.grad_input", TORCH_FN(functionalization::mse_loss_backward_out_grad_input));
  m.impl("multi_margin_loss.out", TORCH_FN(functionalization::multi_margin_loss_out_out));
  m.impl("multi_margin_loss_backward.grad_input", TORCH_FN(functionalization::multi_margin_loss_backward_out_grad_input));
  m.impl("multilabel_margin_loss_forward.output", TORCH_FN(functionalization::multilabel_margin_loss_forward_out_output));
  m.impl("multilabel_margin_loss_backward.grad_input", TORCH_FN(functionalization::multilabel_margin_loss_backward_out_grad_input));
  m.impl("nll_loss_forward.output", TORCH_FN(functionalization::nll_loss_forward_out_output));
  m.impl("nll_loss_backward.grad_input", TORCH_FN(functionalization::nll_loss_backward_out_grad_input));
  m.impl("nll_loss2d_forward.output", TORCH_FN(functionalization::nll_loss2d_forward_out_output));
  m.impl("nll_loss2d_backward.grad_input", TORCH_FN(functionalization::nll_loss2d_backward_out_grad_input));
  m.impl("smooth_l1_loss.out", TORCH_FN(functionalization::smooth_l1_loss_out_out));
  m.impl("smooth_l1_loss_backward.grad_input", TORCH_FN(functionalization::smooth_l1_loss_backward_out_grad_input));
  m.impl("huber_loss.out", TORCH_FN(functionalization::huber_loss_out_out));
  m.impl("huber_loss_backward.out", TORCH_FN(functionalization::huber_loss_backward_out_out));
  m.impl("soft_margin_loss.out", TORCH_FN(functionalization::soft_margin_loss_out_out));
  m.impl("soft_margin_loss_backward.grad_input", TORCH_FN(functionalization::soft_margin_loss_backward_out_grad_input));
  m.impl("elu.out", TORCH_FN(functionalization::elu_out_out));
  m.impl("elu_", TORCH_FN(functionalization::elu_));
  m.impl("elu_backward.grad_input", TORCH_FN(functionalization::elu_backward_out_grad_input));
  m.impl("glu.out", TORCH_FN(functionalization::glu_out_out));
  m.impl("glu_backward.grad_input", TORCH_FN(functionalization::glu_backward_out_grad_input));
  m.impl("glu_jvp.out", TORCH_FN(functionalization::glu_jvp_out_out));
  m.impl("glu_backward_jvp.out", TORCH_FN(functionalization::glu_backward_jvp_out_out));
  m.impl("hardsigmoid.out", TORCH_FN(functionalization::hardsigmoid_out_out));
  m.impl("hardsigmoid_", TORCH_FN(functionalization::hardsigmoid_));
  m.impl("hardsigmoid_backward.grad_input", TORCH_FN(functionalization::hardsigmoid_backward_out_grad_input));
  m.impl("hardtanh.out", TORCH_FN(functionalization::hardtanh_out_out));
  m.impl("hardtanh_", TORCH_FN(functionalization::hardtanh_));
  m.impl("hardtanh_backward.grad_input", TORCH_FN(functionalization::hardtanh_backward_out_grad_input));
  m.impl("hardswish.out", TORCH_FN(functionalization::hardswish_out_out));
  m.impl("hardswish_", TORCH_FN(functionalization::hardswish_));
  m.impl("hardswish_backward.out", TORCH_FN(functionalization::hardswish_backward_out_out));
  m.impl("leaky_relu.out", TORCH_FN(functionalization::leaky_relu_out_out));
  m.impl("leaky_relu_", TORCH_FN(functionalization::leaky_relu_));
  m.impl("leaky_relu_backward.grad_input", TORCH_FN(functionalization::leaky_relu_backward_out_grad_input));
  m.impl("log_sigmoid_forward.output", TORCH_FN(functionalization::log_sigmoid_forward_out_output));
  m.impl("log_sigmoid_backward.grad_input", TORCH_FN(functionalization::log_sigmoid_backward_out_grad_input));
  m.impl("rrelu_with_noise.out", TORCH_FN(functionalization::rrelu_with_noise_out_out));
  m.impl("rrelu_with_noise_", TORCH_FN(functionalization::rrelu_with_noise_));
  m.impl("rrelu_with_noise", TORCH_FN(functionalization::rrelu_with_noise));
  m.impl("rrelu_with_noise_backward.out", TORCH_FN(functionalization::rrelu_with_noise_backward_out_out));
  m.impl("softplus.out", TORCH_FN(functionalization::softplus_out_out));
  m.impl("softplus_backward.grad_input", TORCH_FN(functionalization::softplus_backward_out_grad_input));
  m.impl("softshrink.out", TORCH_FN(functionalization::softshrink_out_out));
  m.impl("softshrink_backward.grad_input", TORCH_FN(functionalization::softshrink_backward_out_grad_input));
  m.impl("adaptive_avg_pool2d.out", TORCH_FN(functionalization::adaptive_avg_pool2d_out_out));
  m.impl("mkldnn_adaptive_avg_pool2d.out", TORCH_FN(functionalization::mkldnn_adaptive_avg_pool2d_out_out));
  m.impl("mkldnn_adaptive_avg_pool2d_backward.out", TORCH_FN(functionalization::mkldnn_adaptive_avg_pool2d_backward_out_out));
  m.impl("_adaptive_avg_pool2d.out", TORCH_FN(functionalization::_adaptive_avg_pool2d_out_out));
  m.impl("_adaptive_avg_pool2d_backward.out", TORCH_FN(functionalization::_adaptive_avg_pool2d_backward_out_out));
  m.impl("adaptive_avg_pool3d.out", TORCH_FN(functionalization::adaptive_avg_pool3d_out_out));
  m.impl("_adaptive_avg_pool3d.out", TORCH_FN(functionalization::_adaptive_avg_pool3d_out_out));
  m.impl("_adaptive_avg_pool3d_backward.out", TORCH_FN(functionalization::_adaptive_avg_pool3d_backward_out_out));
  m.impl("adaptive_max_pool2d.out", TORCH_FN(functionalization::adaptive_max_pool2d_out_out));
  m.impl("adaptive_max_pool2d_backward.grad_input", TORCH_FN(functionalization::adaptive_max_pool2d_backward_out_grad_input));
  m.impl("adaptive_max_pool3d.out", TORCH_FN(functionalization::adaptive_max_pool3d_out_out));
  m.impl("adaptive_max_pool3d_backward.grad_input", TORCH_FN(functionalization::adaptive_max_pool3d_backward_out_grad_input));
  m.impl("avg_pool2d.out", TORCH_FN(functionalization::avg_pool2d_out_out));
  m.impl("avg_pool2d_backward.grad_input", TORCH_FN(functionalization::avg_pool2d_backward_out_grad_input));
  m.impl("avg_pool3d.out", TORCH_FN(functionalization::avg_pool3d_out_out));
  m.impl("avg_pool3d_backward.grad_input", TORCH_FN(functionalization::avg_pool3d_backward_out_grad_input));
  m.impl("fractional_max_pool2d.output", TORCH_FN(functionalization::fractional_max_pool2d_out_output));
  m.impl("fractional_max_pool2d_backward.grad_input", TORCH_FN(functionalization::fractional_max_pool2d_backward_out_grad_input));
  m.impl("fractional_max_pool3d.output", TORCH_FN(functionalization::fractional_max_pool3d_out_output));
  m.impl("fractional_max_pool3d_backward.grad_input", TORCH_FN(functionalization::fractional_max_pool3d_backward_out_grad_input));
  m.impl("max_pool2d_with_indices.out", TORCH_FN(functionalization::max_pool2d_with_indices_out_out));
  m.impl("max_pool2d_with_indices_backward.grad_input", TORCH_FN(functionalization::max_pool2d_with_indices_backward_out_grad_input));
  m.impl("max_pool3d_with_indices.out", TORCH_FN(functionalization::max_pool3d_with_indices_out_out));
  m.impl("max_pool3d_with_indices_backward.grad_input", TORCH_FN(functionalization::max_pool3d_with_indices_backward_out_grad_input));
  m.impl("max_unpool2d.out", TORCH_FN(functionalization::max_unpool2d_out_out));
  m.impl("max_unpool3d.out", TORCH_FN(functionalization::max_unpool3d_out_out));
  m.impl("reflection_pad1d.out", TORCH_FN(functionalization::reflection_pad1d_out_out));
  m.impl("reflection_pad1d_backward.grad_input", TORCH_FN(functionalization::reflection_pad1d_backward_out_grad_input));
  m.impl("reflection_pad2d.out", TORCH_FN(functionalization::reflection_pad2d_out_out));
  m.impl("reflection_pad2d_backward.grad_input", TORCH_FN(functionalization::reflection_pad2d_backward_out_grad_input));
  m.impl("reflection_pad3d.out", TORCH_FN(functionalization::reflection_pad3d_out_out));
  m.impl("reflection_pad3d_backward.grad_input", TORCH_FN(functionalization::reflection_pad3d_backward_out_grad_input));
  m.impl("replication_pad1d.out", TORCH_FN(functionalization::replication_pad1d_out_out));
  m.impl("replication_pad1d_backward.grad_input", TORCH_FN(functionalization::replication_pad1d_backward_out_grad_input));
  m.impl("replication_pad2d.out", TORCH_FN(functionalization::replication_pad2d_out_out));
  m.impl("replication_pad2d_backward.grad_input", TORCH_FN(functionalization::replication_pad2d_backward_out_grad_input));
  m.impl("replication_pad3d.out", TORCH_FN(functionalization::replication_pad3d_out_out));
  m.impl("replication_pad3d_backward.grad_input", TORCH_FN(functionalization::replication_pad3d_backward_out_grad_input));
  m.impl("upsample_bilinear2d.vec_out", TORCH_FN(functionalization::upsample_bilinear2d_out_vec_out));
  m.impl("upsample_nearest2d.vec_out", TORCH_FN(functionalization::upsample_nearest2d_out_vec_out));
  m.impl("upsample_linear1d.out", TORCH_FN(functionalization::upsample_linear1d_out_out));
  m.impl("upsample_linear1d_backward.grad_input", TORCH_FN(functionalization::upsample_linear1d_backward_out_grad_input));
  m.impl("upsample_bilinear2d.out", TORCH_FN(functionalization::upsample_bilinear2d_out_out));
  m.impl("upsample_bilinear2d_backward.grad_input", TORCH_FN(functionalization::upsample_bilinear2d_backward_out_grad_input));
  m.impl("_upsample_bilinear2d_aa.out", TORCH_FN(functionalization::_upsample_bilinear2d_aa_out_out));
  m.impl("_upsample_bilinear2d_aa_backward.grad_input", TORCH_FN(functionalization::_upsample_bilinear2d_aa_backward_out_grad_input));
  m.impl("upsample_bicubic2d.out", TORCH_FN(functionalization::upsample_bicubic2d_out_out));
  m.impl("upsample_bicubic2d_backward.grad_input", TORCH_FN(functionalization::upsample_bicubic2d_backward_out_grad_input));
  m.impl("_upsample_bicubic2d_aa.out", TORCH_FN(functionalization::_upsample_bicubic2d_aa_out_out));
  m.impl("_upsample_bicubic2d_aa_backward.grad_input", TORCH_FN(functionalization::_upsample_bicubic2d_aa_backward_out_grad_input));
  m.impl("upsample_trilinear3d.out", TORCH_FN(functionalization::upsample_trilinear3d_out_out));
  m.impl("upsample_trilinear3d_backward.grad_input", TORCH_FN(functionalization::upsample_trilinear3d_backward_out_grad_input));
  m.impl("upsample_nearest1d.out", TORCH_FN(functionalization::upsample_nearest1d_out_out));
  m.impl("_upsample_nearest_exact1d.out", TORCH_FN(functionalization::_upsample_nearest_exact1d_out_out));
  m.impl("upsample_nearest1d_backward.grad_input", TORCH_FN(functionalization::upsample_nearest1d_backward_out_grad_input));
  m.impl("_upsample_nearest_exact1d_backward.grad_input", TORCH_FN(functionalization::_upsample_nearest_exact1d_backward_out_grad_input));
  m.impl("upsample_nearest2d.out", TORCH_FN(functionalization::upsample_nearest2d_out_out));
  m.impl("_upsample_nearest_exact2d.out", TORCH_FN(functionalization::_upsample_nearest_exact2d_out_out));
  m.impl("upsample_nearest2d_backward.grad_input", TORCH_FN(functionalization::upsample_nearest2d_backward_out_grad_input));
  m.impl("_upsample_nearest_exact2d_backward.grad_input", TORCH_FN(functionalization::_upsample_nearest_exact2d_backward_out_grad_input));
  m.impl("upsample_nearest3d.out", TORCH_FN(functionalization::upsample_nearest3d_out_out));
  m.impl("_upsample_nearest_exact3d.out", TORCH_FN(functionalization::_upsample_nearest_exact3d_out_out));
  m.impl("upsample_nearest3d_backward.grad_input", TORCH_FN(functionalization::upsample_nearest3d_backward_out_grad_input));
  m.impl("_upsample_nearest_exact3d_backward.grad_input", TORCH_FN(functionalization::_upsample_nearest_exact3d_backward_out_grad_input));
  m.impl("sigmoid_backward.grad_input", TORCH_FN(functionalization::sigmoid_backward_out_grad_input));
  m.impl("logit_backward.grad_input", TORCH_FN(functionalization::logit_backward_out_grad_input));
  m.impl("tanh_backward.grad_input", TORCH_FN(functionalization::tanh_backward_out_grad_input));
  m.impl("slow_conv_transpose2d.out", TORCH_FN(functionalization::slow_conv_transpose2d_out_out));
  m.impl("slow_conv_transpose3d.out", TORCH_FN(functionalization::slow_conv_transpose3d_out_out));
  m.impl("_slow_conv2d_forward.output", TORCH_FN(functionalization::_slow_conv2d_forward_out_output));
  m.impl("_slow_conv2d_backward.output_mask_out", TORCH_FN(functionalization::_slow_conv2d_backward_out_output_mask_out));
  m.impl("_conv_depthwise2d.out", TORCH_FN(functionalization::_conv_depthwise2d_out_out));
  m.impl("conv_depthwise3d.out", TORCH_FN(functionalization::conv_depthwise3d_out_out));
  m.impl("slow_conv3d_forward.output", TORCH_FN(functionalization::slow_conv3d_forward_out_output));
  m.impl("slow_conv_dilated2d.out", TORCH_FN(functionalization::slow_conv_dilated2d_out_out));
  m.impl("slow_conv_dilated3d.out", TORCH_FN(functionalization::slow_conv_dilated3d_out_out));
  m.impl("col2im.out", TORCH_FN(functionalization::col2im_out_out));
  m.impl("im2col.out", TORCH_FN(functionalization::im2col_out_out));
  m.impl("isinf.out", TORCH_FN(functionalization::isinf_out_out));
  m.impl("isposinf.out", TORCH_FN(functionalization::isposinf_out_out));
  m.impl("isneginf.out", TORCH_FN(functionalization::isneginf_out_out));
  m.impl("special_entr.out", TORCH_FN(functionalization::special_entr_out_out));
  m.impl("special_ndtri.out", TORCH_FN(functionalization::special_ndtri_out_out));
  m.impl("special_log_ndtr.out", TORCH_FN(functionalization::special_log_ndtr_out_out));
  m.impl("special_erfcx.out", TORCH_FN(functionalization::special_erfcx_out_out));
  m.impl("special_xlog1py.out", TORCH_FN(functionalization::special_xlog1py_out_out));
  m.impl("special_xlog1py.self_scalar_out", TORCH_FN(functionalization::special_xlog1py_out_self_scalar_out));
  m.impl("special_xlog1py.other_scalar_out", TORCH_FN(functionalization::special_xlog1py_out_other_scalar_out));
  m.impl("special_zeta.out", TORCH_FN(functionalization::special_zeta_out_out));
  m.impl("special_zeta.self_scalar_out", TORCH_FN(functionalization::special_zeta_out_self_scalar_out));
  m.impl("special_zeta.other_scalar_out", TORCH_FN(functionalization::special_zeta_out_other_scalar_out));
  m.impl("special_i0e.out", TORCH_FN(functionalization::special_i0e_out_out));
  m.impl("special_i1.out", TORCH_FN(functionalization::special_i1_out_out));
  m.impl("special_i1e.out", TORCH_FN(functionalization::special_i1e_out_out));
  m.impl("fft_fftfreq.out", TORCH_FN(functionalization::fft_fftfreq_out_out));
  m.impl("fft_rfftfreq.out", TORCH_FN(functionalization::fft_rfftfreq_out_out));
  m.impl("linalg_cholesky_ex.L", TORCH_FN(functionalization::linalg_cholesky_ex_out_L));
  m.impl("linalg_cross.out", TORCH_FN(functionalization::linalg_cross_out_out));
  m.impl("linalg_lu_factor_ex.out", TORCH_FN(functionalization::linalg_lu_factor_ex_out_out));
  m.impl("linalg_lu.out", TORCH_FN(functionalization::linalg_lu_out_out));
  m.impl("linalg_lu_solve.out", TORCH_FN(functionalization::linalg_lu_solve_out_out));
  m.impl("_linalg_det.result", TORCH_FN(functionalization::_linalg_det_out_result));
  m.impl("linalg_ldl_factor_ex.out", TORCH_FN(functionalization::linalg_ldl_factor_ex_out_out));
  m.impl("linalg_ldl_solve.out", TORCH_FN(functionalization::linalg_ldl_solve_out_out));
  m.impl("linalg_lstsq.out", TORCH_FN(functionalization::linalg_lstsq_out_out));
  m.impl("linalg_matrix_exp.out", TORCH_FN(functionalization::linalg_matrix_exp_out_out));
  m.impl("_linalg_slogdet.sign", TORCH_FN(functionalization::_linalg_slogdet_out_sign));
  m.impl("linalg_eig.out", TORCH_FN(functionalization::linalg_eig_out_out));
  m.impl("linalg_eigvals.out", TORCH_FN(functionalization::linalg_eigvals_out_out));
  m.impl("_linalg_eigh.eigenvalues", TORCH_FN(functionalization::_linalg_eigh_out_eigenvalues));
  m.impl("linalg_householder_product.out", TORCH_FN(functionalization::linalg_householder_product_out_out));
  m.impl("linalg_inv_ex.inverse", TORCH_FN(functionalization::linalg_inv_ex_out_inverse));
  m.impl("linalg_vector_norm.out", TORCH_FN(functionalization::linalg_vector_norm_out_out));
  m.impl("_linalg_svd.U", TORCH_FN(functionalization::_linalg_svd_out_U));
  m.impl("linalg_pinv.atol_rtol_tensor_out", TORCH_FN(functionalization::linalg_pinv_out_atol_rtol_tensor_out));
  m.impl("_linalg_solve_ex.result", TORCH_FN(functionalization::_linalg_solve_ex_out_result));
  m.impl("linalg_qr.out", TORCH_FN(functionalization::linalg_qr_out_out));
  m.impl("_test_optional_intlist.out", TORCH_FN(functionalization::_test_optional_intlist_out_out));
  m.impl("_test_optional_filled_intlist.out", TORCH_FN(functionalization::_test_optional_filled_intlist_out_out));
  m.impl("_test_optional_floatlist.out", TORCH_FN(functionalization::_test_optional_floatlist_out_out));
  m.impl("_test_warn_in_autograd.out", TORCH_FN(functionalization::_test_warn_in_autograd_out_out));
  m.impl("_test_autograd_multiple_dispatch.fullcoverage_out", TORCH_FN(functionalization::_test_autograd_multiple_dispatch_out_fullcoverage_out));
  m.impl("_test_autograd_multiple_dispatch_view_copy.out", TORCH_FN(functionalization::_test_autograd_multiple_dispatch_view_copy_out_out));
  m.impl("segment_reduce.out", TORCH_FN(functionalization::segment_reduce_out_out));
  m.impl("_segment_reduce_backward.out", TORCH_FN(functionalization::_segment_reduce_backward_out_out));
  m.impl("_nested_tensor_from_tensor_list.out", TORCH_FN(functionalization::_nested_tensor_from_tensor_list_out_out));
  m.impl("_fw_primal_copy.out", TORCH_FN(functionalization::_fw_primal_copy_out_out));
  m.impl("_make_dual_copy.out", TORCH_FN(functionalization::_make_dual_copy_out_out));
  m.impl("view_as_real_copy.out", TORCH_FN(functionalization::view_as_real_copy_out_out));
  m.impl("view_as_complex_copy.out", TORCH_FN(functionalization::view_as_complex_copy_out_out));
  m.impl("_conj_copy.out", TORCH_FN(functionalization::_conj_copy_out_out));
  m.impl("_neg_view_copy.out", TORCH_FN(functionalization::_neg_view_copy_out_out));
  m.impl("as_strided_copy.out", TORCH_FN(functionalization::as_strided_copy_out_out));
  m.impl("_sparse_broadcast_to_copy.out", TORCH_FN(functionalization::_sparse_broadcast_to_copy_out_out));
  m.impl("diagonal_copy.out", TORCH_FN(functionalization::diagonal_copy_out_out));
  m.impl("expand_copy.out", TORCH_FN(functionalization::expand_copy_out_out));
  m.impl("permute_copy.out", TORCH_FN(functionalization::permute_copy_out_out));
  m.impl("_reshape_alias_copy.out", TORCH_FN(functionalization::_reshape_alias_copy_out_out));
  m.impl("select_copy.int_out", TORCH_FN(functionalization::select_copy_out_int_out));
  m.impl("detach_copy.out", TORCH_FN(functionalization::detach_copy_out_out));
  m.impl("slice_copy.Tensor_out", TORCH_FN(functionalization::slice_copy_out_Tensor_out));
  m.impl("split_copy.Tensor_out", TORCH_FN(functionalization::split_copy_out_Tensor_out));
  m.impl("split_with_sizes_copy.out", TORCH_FN(functionalization::split_with_sizes_copy_out_out));
  m.impl("squeeze_copy.out", TORCH_FN(functionalization::squeeze_copy_out_out));
  m.impl("squeeze_copy.dim_out", TORCH_FN(functionalization::squeeze_copy_out_dim_out));
  m.impl("squeeze_copy.dims_out", TORCH_FN(functionalization::squeeze_copy_out_dims_out));
  m.impl("t_copy.out", TORCH_FN(functionalization::t_copy_out_out));
  m.impl("transpose_copy.int_out", TORCH_FN(functionalization::transpose_copy_out_int_out));
  m.impl("unsqueeze_copy.out", TORCH_FN(functionalization::unsqueeze_copy_out_out));
  m.impl("_indices_copy.out", TORCH_FN(functionalization::_indices_copy_out_out));
  m.impl("_values_copy.out", TORCH_FN(functionalization::_values_copy_out_out));
  m.impl("indices_copy.out", TORCH_FN(functionalization::indices_copy_out_out));
  m.impl("values_copy.out", TORCH_FN(functionalization::values_copy_out_out));
  m.impl("crow_indices_copy.out", TORCH_FN(functionalization::crow_indices_copy_out_out));
  m.impl("col_indices_copy.out", TORCH_FN(functionalization::col_indices_copy_out_out));
  m.impl("ccol_indices_copy.out", TORCH_FN(functionalization::ccol_indices_copy_out_out));
  m.impl("row_indices_copy.out", TORCH_FN(functionalization::row_indices_copy_out_out));
  m.impl("unbind_copy.int_out", TORCH_FN(functionalization::unbind_copy_out_int_out));
  m.impl("view_copy.out", TORCH_FN(functionalization::view_copy_out_out));
  m.impl("view_copy.dtype_out", TORCH_FN(functionalization::view_copy_out_dtype_out));
  m.impl("unfold_copy.out", TORCH_FN(functionalization::unfold_copy_out_out));
  m.impl("alias_copy.out", TORCH_FN(functionalization::alias_copy_out_out));
  m.impl("to_padded_tensor.out", TORCH_FN(functionalization::to_padded_tensor_out_out));
  m.impl("_transformer_encoder_layer_fwd.out", TORCH_FN(functionalization::_transformer_encoder_layer_fwd_out_out));
  m.impl("_native_multi_head_attention.out", TORCH_FN(functionalization::_native_multi_head_attention_out_out));
  m.impl("_triton_scaled_dot_attention.out", TORCH_FN(functionalization::_triton_scaled_dot_attention_out_out));
  m.impl("_triton_multi_head_attention.out", TORCH_FN(functionalization::_triton_multi_head_attention_out_out));
  m.impl("special_airy_ai.out", TORCH_FN(functionalization::special_airy_ai_out_out));
  m.impl("special_bessel_j0.out", TORCH_FN(functionalization::special_bessel_j0_out_out));
  m.impl("special_bessel_j1.out", TORCH_FN(functionalization::special_bessel_j1_out_out));
  m.impl("special_bessel_y0.out", TORCH_FN(functionalization::special_bessel_y0_out_out));
  m.impl("special_bessel_y1.out", TORCH_FN(functionalization::special_bessel_y1_out_out));
  m.impl("special_chebyshev_polynomial_t.out", TORCH_FN(functionalization::special_chebyshev_polynomial_t_out_out));
  m.impl("special_chebyshev_polynomial_t.x_scalar_out", TORCH_FN(functionalization::special_chebyshev_polynomial_t_out_x_scalar_out));
  m.impl("special_chebyshev_polynomial_t.n_scalar_out", TORCH_FN(functionalization::special_chebyshev_polynomial_t_out_n_scalar_out));
  m.impl("special_chebyshev_polynomial_u.out", TORCH_FN(functionalization::special_chebyshev_polynomial_u_out_out));
  m.impl("special_chebyshev_polynomial_u.x_scalar_out", TORCH_FN(functionalization::special_chebyshev_polynomial_u_out_x_scalar_out));
  m.impl("special_chebyshev_polynomial_u.n_scalar_out", TORCH_FN(functionalization::special_chebyshev_polynomial_u_out_n_scalar_out));
  m.impl("special_chebyshev_polynomial_v.out", TORCH_FN(functionalization::special_chebyshev_polynomial_v_out_out));
  m.impl("special_chebyshev_polynomial_v.x_scalar_out", TORCH_FN(functionalization::special_chebyshev_polynomial_v_out_x_scalar_out));
  m.impl("special_chebyshev_polynomial_v.n_scalar_out", TORCH_FN(functionalization::special_chebyshev_polynomial_v_out_n_scalar_out));
  m.impl("special_chebyshev_polynomial_w.out", TORCH_FN(functionalization::special_chebyshev_polynomial_w_out_out));
  m.impl("special_chebyshev_polynomial_w.x_scalar_out", TORCH_FN(functionalization::special_chebyshev_polynomial_w_out_x_scalar_out));
  m.impl("special_chebyshev_polynomial_w.n_scalar_out", TORCH_FN(functionalization::special_chebyshev_polynomial_w_out_n_scalar_out));
  m.impl("special_hermite_polynomial_h.out", TORCH_FN(functionalization::special_hermite_polynomial_h_out_out));
  m.impl("special_hermite_polynomial_h.x_scalar_out", TORCH_FN(functionalization::special_hermite_polynomial_h_out_x_scalar_out));
  m.impl("special_hermite_polynomial_h.n_scalar_out", TORCH_FN(functionalization::special_hermite_polynomial_h_out_n_scalar_out));
  m.impl("special_hermite_polynomial_he.out", TORCH_FN(functionalization::special_hermite_polynomial_he_out_out));
  m.impl("special_hermite_polynomial_he.x_scalar_out", TORCH_FN(functionalization::special_hermite_polynomial_he_out_x_scalar_out));
  m.impl("special_hermite_polynomial_he.n_scalar_out", TORCH_FN(functionalization::special_hermite_polynomial_he_out_n_scalar_out));
  m.impl("special_laguerre_polynomial_l.out", TORCH_FN(functionalization::special_laguerre_polynomial_l_out_out));
  m.impl("special_laguerre_polynomial_l.x_scalar_out", TORCH_FN(functionalization::special_laguerre_polynomial_l_out_x_scalar_out));
  m.impl("special_laguerre_polynomial_l.n_scalar_out", TORCH_FN(functionalization::special_laguerre_polynomial_l_out_n_scalar_out));
  m.impl("special_legendre_polynomial_p.out", TORCH_FN(functionalization::special_legendre_polynomial_p_out_out));
  m.impl("special_legendre_polynomial_p.x_scalar_out", TORCH_FN(functionalization::special_legendre_polynomial_p_out_x_scalar_out));
  m.impl("special_legendre_polynomial_p.n_scalar_out", TORCH_FN(functionalization::special_legendre_polynomial_p_out_n_scalar_out));
  m.impl("special_modified_bessel_i0.out", TORCH_FN(functionalization::special_modified_bessel_i0_out_out));
  m.impl("special_modified_bessel_i1.out", TORCH_FN(functionalization::special_modified_bessel_i1_out_out));
  m.impl("special_modified_bessel_k0.out", TORCH_FN(functionalization::special_modified_bessel_k0_out_out));
  m.impl("special_modified_bessel_k1.out", TORCH_FN(functionalization::special_modified_bessel_k1_out_out));
  m.impl("special_scaled_modified_bessel_k0.out", TORCH_FN(functionalization::special_scaled_modified_bessel_k0_out_out));
  m.impl("special_scaled_modified_bessel_k1.out", TORCH_FN(functionalization::special_scaled_modified_bessel_k1_out_out));
  m.impl("special_shifted_chebyshev_polynomial_t.out", TORCH_FN(functionalization::special_shifted_chebyshev_polynomial_t_out_out));
  m.impl("special_shifted_chebyshev_polynomial_t.x_scalar_out", TORCH_FN(functionalization::special_shifted_chebyshev_polynomial_t_out_x_scalar_out));
  m.impl("special_shifted_chebyshev_polynomial_t.n_scalar_out", TORCH_FN(functionalization::special_shifted_chebyshev_polynomial_t_out_n_scalar_out));
  m.impl("special_shifted_chebyshev_polynomial_u.out", TORCH_FN(functionalization::special_shifted_chebyshev_polynomial_u_out_out));
  m.impl("special_shifted_chebyshev_polynomial_u.x_scalar_out", TORCH_FN(functionalization::special_shifted_chebyshev_polynomial_u_out_x_scalar_out));
  m.impl("special_shifted_chebyshev_polynomial_u.n_scalar_out", TORCH_FN(functionalization::special_shifted_chebyshev_polynomial_u_out_n_scalar_out));
  m.impl("special_shifted_chebyshev_polynomial_v.out", TORCH_FN(functionalization::special_shifted_chebyshev_polynomial_v_out_out));
  m.impl("special_shifted_chebyshev_polynomial_v.x_scalar_out", TORCH_FN(functionalization::special_shifted_chebyshev_polynomial_v_out_x_scalar_out));
  m.impl("special_shifted_chebyshev_polynomial_v.n_scalar_out", TORCH_FN(functionalization::special_shifted_chebyshev_polynomial_v_out_n_scalar_out));
  m.impl("special_shifted_chebyshev_polynomial_w.out", TORCH_FN(functionalization::special_shifted_chebyshev_polynomial_w_out_out));
  m.impl("special_shifted_chebyshev_polynomial_w.x_scalar_out", TORCH_FN(functionalization::special_shifted_chebyshev_polynomial_w_out_x_scalar_out));
  m.impl("special_shifted_chebyshev_polynomial_w.n_scalar_out", TORCH_FN(functionalization::special_shifted_chebyshev_polynomial_w_out_n_scalar_out));
  m.impl("special_spherical_bessel_j0.out", TORCH_FN(functionalization::special_spherical_bessel_j0_out_out));
  m.impl("_foobar.out", TORCH_FN(functionalization::_foobar_out_out));
  m.impl("_fused_adam.out", TORCH_FN(functionalization::_fused_adam_out_out));
  m.impl("_fused_adam_", TORCH_FN(functionalization::_fused_adam_));
  m.impl("_fused_adam.tensor_lr_out", TORCH_FN(functionalization::_fused_adam_out_tensor_lr_out));
  m.impl("_fused_adam_.tensor_lr", TORCH_FN(functionalization::_fused_adam__tensor_lr));
  m.impl("_fused_adamw.out", TORCH_FN(functionalization::_fused_adamw_out_out));
  m.impl("_fused_adamw_", TORCH_FN(functionalization::_fused_adamw_));
  m.impl("_fused_adamw.tensor_lr_out", TORCH_FN(functionalization::_fused_adamw_out_tensor_lr_out));
  m.impl("_fused_adamw_.tensor_lr", TORCH_FN(functionalization::_fused_adamw__tensor_lr));
  m.impl("_fused_sgd.out", TORCH_FN(functionalization::_fused_sgd_out_out));
  m.impl("_fused_sgd_", TORCH_FN(functionalization::_fused_sgd_));
  m.impl("_fused_sgd.tensor_lr_out", TORCH_FN(functionalization::_fused_sgd_out_tensor_lr_out));
  m.impl("_fused_sgd_.tensor_lr", TORCH_FN(functionalization::_fused_sgd__tensor_lr));
  m.impl("_fused_adagrad.out", TORCH_FN(functionalization::_fused_adagrad_out_out));
  m.impl("_fused_adagrad_", TORCH_FN(functionalization::_fused_adagrad_));
  m.impl("_fw_primal", TORCH_FN(functionalization::_fw_primal));
  m.impl("_make_dual", TORCH_FN(functionalization::_make_dual));
  m.impl("view_as_real", TORCH_FN(functionalization::view_as_real));
  m.impl("view_as_complex", TORCH_FN(functionalization::view_as_complex));
  m.impl("_conj", TORCH_FN(functionalization::_conj));
  m.impl("_neg_view", TORCH_FN(functionalization::_neg_view));
  m.impl("as_strided", TORCH_FN(functionalization::as_strided));
  m.impl("as_strided_", TORCH_FN(functionalization::as_strided_));
  m.impl("_sparse_broadcast_to", TORCH_FN(functionalization::_sparse_broadcast_to));
  m.impl("diagonal", TORCH_FN(functionalization::diagonal));
  m.impl("expand", TORCH_FN(functionalization::expand));
  m.impl("permute", TORCH_FN(functionalization::permute));
  m.impl("_reshape_alias", TORCH_FN(functionalization::_reshape_alias));
  m.impl("select.int", TORCH_FN(functionalization::select_int));
  m.impl("detach", TORCH_FN(functionalization::detach));
  m.impl("detach_", TORCH_FN(functionalization::detach_));
  m.impl("slice.Tensor", TORCH_FN(functionalization::slice_Tensor));
  m.impl("slice_inverse", TORCH_FN(functionalization::slice_inverse));
  m.impl("split.Tensor", TORCH_FN(functionalization::split_Tensor));
  m.impl("split_with_sizes", TORCH_FN(functionalization::split_with_sizes));
  m.impl("squeeze", TORCH_FN(functionalization::squeeze));
  m.impl("squeeze_", TORCH_FN(functionalization::squeeze_));
  m.impl("squeeze.dim", TORCH_FN(functionalization::squeeze_dim));
  m.impl("squeeze_.dim", TORCH_FN(functionalization::squeeze__dim));
  m.impl("squeeze.dims", TORCH_FN(functionalization::squeeze_dims));
  m.impl("squeeze_.dims", TORCH_FN(functionalization::squeeze__dims));
  m.impl("t", TORCH_FN(functionalization::t));
  m.impl("t_", TORCH_FN(functionalization::t_));
  m.impl("transpose.int", TORCH_FN(functionalization::transpose_int));
  m.impl("transpose_", TORCH_FN(functionalization::transpose_));
  m.impl("_nested_view_from_buffer", TORCH_FN(functionalization::_nested_view_from_buffer));
  m.impl("_nested_view_from_jagged", TORCH_FN(functionalization::_nested_view_from_jagged));
  m.impl("_nested_get_values", TORCH_FN(functionalization::_nested_get_values));
  m.impl("unsqueeze", TORCH_FN(functionalization::unsqueeze));
  m.impl("unsqueeze_", TORCH_FN(functionalization::unsqueeze_));
  m.impl("_indices", TORCH_FN(functionalization::_indices));
  m.impl("_values", TORCH_FN(functionalization::_values));
  m.impl("indices", TORCH_FN(functionalization::indices));
  m.impl("values", TORCH_FN(functionalization::values));
  m.impl("crow_indices", TORCH_FN(functionalization::crow_indices));
  m.impl("col_indices", TORCH_FN(functionalization::col_indices));
  m.impl("ccol_indices", TORCH_FN(functionalization::ccol_indices));
  m.impl("row_indices", TORCH_FN(functionalization::row_indices));
  m.impl("unbind.int", TORCH_FN(functionalization::unbind_int));
  m.impl("view", TORCH_FN(functionalization::view));
  m.impl("view.dtype", TORCH_FN(functionalization::view_dtype));
  m.impl("unfold", TORCH_FN(functionalization::unfold));
  m.impl("alias", TORCH_FN(functionalization::alias));
  m.impl("_test_autograd_multiple_dispatch_view", TORCH_FN(functionalization::_test_autograd_multiple_dispatch_view));;
}

}  // namespace

} // namespace at
